From 0ebce81238e04363909c0ffd18902cb34d975a97 Mon Sep 17 00:00:00 2001 From: testisnullus Date: Wed, 15 Nov 2023 10:28:34 +0200 Subject: [PATCH 1/4] vendor --- go.mod | 18 +- go.sum | 69 +- .../github.com/emicklei/go-restful/.gitignore | 1 + .../emicklei/go-restful/.travis.yml | 9 +- .../github.com/emicklei/go-restful/CHANGES.md | 151 +- .../github.com/emicklei/go-restful/Makefile | 6 +- .../github.com/emicklei/go-restful/README.md | 39 +- .../emicklei/go-restful/container.go | 130 +- .../github.com/emicklei/go-restful/curly.go | 13 +- .../emicklei/go-restful/custom_verb.go | 29 + .../github.com/emicklei/go-restful/jsr311.go | 23 +- .../emicklei/go-restful/path_processor.go | 15 +- .../emicklei/go-restful/response.go | 11 +- .../github.com/emicklei/go-restful/route.go | 33 +- .../emicklei/go-restful/route_builder.go | 94 +- .../emicklei/go-restful/service_error.go | 11 +- .../emicklei/go-restful/web_service.go | 7 +- vendor/github.com/go-logr/logr/README.md | 4 + vendor/github.com/go-logr/logr/logr.go | 20 +- .../github.com/go-openapi/swag/.gitattributes | 2 + .../github.com/go-openapi/swag/.golangci.yml | 11 + vendor/github.com/go-openapi/swag/.travis.yml | 37 - vendor/github.com/go-openapi/swag/file.go | 33 + .../github.com/go-openapi/swag/post_go18.go | 1 + .../github.com/go-openapi/swag/post_go19.go | 1 + vendor/github.com/go-openapi/swag/pre_go18.go | 1 + vendor/github.com/go-openapi/swag/pre_go19.go | 1 + vendor/github.com/google/go-cmp/cmp/path.go | 2 +- .../google/go-cmp/cmp/report_slices.go | 202 +- .../mailru/easyjson/jlexer/lexer.go | 14 + vendor/github.com/openshift/api/LICENSE | 191 + ...rsion-operator_01_clusteroperator.crd.yaml | 137 + ...ersion-operator_01_clusterversion.crd.yaml | 359 ++ ...03_config-operator_01_operatorhub.crd.yaml | 83 + .../0000_03_config-operator_01_proxy.crd.yaml | 78 + ...0_10_config-operator_01_apiserver.crd.yaml | 177 + ...config-operator_01_authentication.crd.yaml | 101 + .../0000_10_config-operator_01_build.crd.yaml | 271 + ...000_10_config-operator_01_console.crd.yaml | 57 + .../0000_10_config-operator_01_dns.crd.yaml | 72 + ...10_config-operator_01_featuregate.crd.yaml | 63 + .../0000_10_config-operator_01_image.crd.yaml | 108 + ...ig-operator_01_imagecontentpolicy.crd.yaml | 68 + ...config-operator_01_infrastructure.crd.yaml | 461 ++ ...000_10_config-operator_01_ingress.crd.yaml | 274 + ...000_10_config-operator_01_network.crd.yaml | 163 + .../0000_10_config-operator_01_oauth.crd.yaml | 444 ++ ...000_10_config-operator_01_project.crd.yaml | 55 + ...0_10_config-operator_01_scheduler.crd.yaml | 68 + .../github.com/openshift/api/config/v1/doc.go | 8 + .../openshift/api/config/v1/register.go | 72 + .../openshift/api/config/v1/stringsource.go | 31 + .../openshift/api/config/v1/types.go | 400 ++ .../api/config/v1/types_apiserver.go | 211 + .../api/config/v1/types_authentication.go | 156 + .../openshift/api/config/v1/types_build.go | 121 + .../api/config/v1/types_cluster_operator.go | 203 + .../api/config/v1/types_cluster_version.go | 429 ++ .../openshift/api/config/v1/types_console.go | 69 + .../openshift/api/config/v1/types_dns.go | 92 + .../openshift/api/config/v1/types_feature.go | 225 + .../openshift/api/config/v1/types_image.go | 128 + .../config/v1/types_image_content_policy.go | 89 + .../api/config/v1/types_infrastructure.go | 706 +++ .../openshift/api/config/v1/types_ingress.go | 211 + .../openshift/api/config/v1/types_network.go | 177 + .../openshift/api/config/v1/types_oauth.go | 585 ++ .../api/config/v1/types_operatorhub.go | 85 + .../openshift/api/config/v1/types_project.go | 59 + .../openshift/api/config/v1/types_proxy.go | 99 + .../api/config/v1/types_scheduling.go | 105 + .../api/config/v1/types_tlssecurityprofile.go | 262 + .../api/config/v1/zz_generated.deepcopy.go | 4366 ++++++++++++++ .../v1/zz_generated.swagger_doc_generated.go | 1859 ++++++ .../openshift/custom-resource-status/LICENSE | 201 + .../conditions/v1/conditions.go | 104 + .../conditions/v1/doc.go | 9 + .../conditions/v1/types.go | 51 + .../conditions/v1/zz_generated.deepcopy.go | 23 + vendor/github.com/pborman/uuid/.travis.yml | 10 + .../github.com/pborman/uuid/CONTRIBUTING.md | 10 + vendor/github.com/pborman/uuid/CONTRIBUTORS | 1 + vendor/github.com/pborman/uuid/LICENSE | 27 + vendor/github.com/pborman/uuid/README.md | 15 + vendor/github.com/pborman/uuid/dce.go | 84 + vendor/github.com/pborman/uuid/doc.go | 13 + vendor/github.com/pborman/uuid/hash.go | 53 + vendor/github.com/pborman/uuid/marshal.go | 85 + vendor/github.com/pborman/uuid/node.go | 50 + vendor/github.com/pborman/uuid/sql.go | 68 + vendor/github.com/pborman/uuid/time.go | 57 + vendor/github.com/pborman/uuid/util.go | 32 + vendor/github.com/pborman/uuid/uuid.go | 162 + vendor/github.com/pborman/uuid/version1.go | 23 + vendor/github.com/pborman/uuid/version4.go | 26 + vendor/kubevirt.io/api/LICENSE | 202 + vendor/kubevirt.io/api/core/register.go | 4 + .../api/core/v1/componentconfig.go | 48 + .../api/core/v1/deepcopy_generated.go | 5304 +++++++++++++++++ vendor/kubevirt.io/api/core/v1/defaults.go | 240 + vendor/kubevirt.io/api/core/v1/doc.go | 8 + vendor/kubevirt.io/api/core/v1/register.go | 121 + vendor/kubevirt.io/api/core/v1/sanitizers.go | 45 + vendor/kubevirt.io/api/core/v1/schema.go | 1441 +++++ .../api/core/v1/schema_swagger_generated.go | 814 +++ vendor/kubevirt.io/api/core/v1/types.go | 2527 ++++++++ .../api/core/v1/types_swagger_generated.go | 848 +++ .../api/core/v1/zz_generated.defaults.go | 566 ++ .../containerized-data-importer-api/LICENSE | 202 + .../pkg/apis/core/register.go | 6 + .../pkg/apis/core/v1beta1/doc.go | 6 + .../pkg/apis/core/v1beta1/register.go | 54 + .../pkg/apis/core/v1beta1/types.go | 832 +++ .../core/v1beta1/types_swagger_generated.go | 408 ++ .../pkg/apis/core/v1beta1/types_transfer.go | 134 + .../pkg/apis/core/v1beta1/utils.go | 61 + .../core/v1beta1/zz_generated.deepcopy.go | 1560 +++++ .../api/LICENSE | 201 + .../api/types.go | 130 + vendor/modules.txt | 32 +- 120 files changed, 31625 insertions(+), 239 deletions(-) create mode 100644 vendor/github.com/emicklei/go-restful/custom_verb.go create mode 100644 vendor/github.com/go-openapi/swag/.gitattributes delete mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/file.go create mode 100644 vendor/github.com/openshift/api/LICENSE create mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/doc.go create mode 100644 vendor/github.com/openshift/api/config/v1/register.go create mode 100644 vendor/github.com/openshift/api/config/v1/stringsource.go create mode 100644 vendor/github.com/openshift/api/config/v1/types.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_apiserver.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_authentication.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_build.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_operator.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_version.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_console.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_dns.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_feature.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image_content_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_infrastructure.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_ingress.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_network.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_oauth.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_operatorhub.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_project.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_proxy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_scheduling.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/custom-resource-status/LICENSE create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go create mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/pborman/uuid/.travis.yml create mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTING.md create mode 100644 vendor/github.com/pborman/uuid/CONTRIBUTORS create mode 100644 vendor/github.com/pborman/uuid/LICENSE create mode 100644 vendor/github.com/pborman/uuid/README.md create mode 100644 vendor/github.com/pborman/uuid/dce.go create mode 100644 vendor/github.com/pborman/uuid/doc.go create mode 100644 vendor/github.com/pborman/uuid/hash.go create mode 100644 vendor/github.com/pborman/uuid/marshal.go create mode 100644 vendor/github.com/pborman/uuid/node.go create mode 100644 vendor/github.com/pborman/uuid/sql.go create mode 100644 vendor/github.com/pborman/uuid/time.go create mode 100644 vendor/github.com/pborman/uuid/util.go create mode 100644 vendor/github.com/pborman/uuid/uuid.go create mode 100644 vendor/github.com/pborman/uuid/version1.go create mode 100644 vendor/github.com/pborman/uuid/version4.go create mode 100644 vendor/kubevirt.io/api/LICENSE create mode 100644 vendor/kubevirt.io/api/core/register.go create mode 100644 vendor/kubevirt.io/api/core/v1/componentconfig.go create mode 100644 vendor/kubevirt.io/api/core/v1/deepcopy_generated.go create mode 100644 vendor/kubevirt.io/api/core/v1/defaults.go create mode 100644 vendor/kubevirt.io/api/core/v1/doc.go create mode 100644 vendor/kubevirt.io/api/core/v1/register.go create mode 100644 vendor/kubevirt.io/api/core/v1/sanitizers.go create mode 100644 vendor/kubevirt.io/api/core/v1/schema.go create mode 100644 vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go create mode 100644 vendor/kubevirt.io/api/core/v1/types.go create mode 100644 vendor/kubevirt.io/api/core/v1/types_swagger_generated.go create mode 100644 vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/LICENSE create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go create mode 100644 vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/LICENSE create mode 100644 vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go diff --git a/go.mod b/go.mod index 4d8ad4141..54a1ff9c6 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/instaclustr/operator go 1.18 require ( - github.com/go-logr/logr v1.2.0 + github.com/go-logr/logr v1.2.3 github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-version v1.6.0 github.com/jackc/pgx/v5 v5.4.3 @@ -14,6 +14,8 @@ require ( k8s.io/apimachinery v0.24.2 k8s.io/client-go v0.24.2 k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + kubevirt.io/api v0.59.0 + kubevirt.io/containerized-data-importer-api v1.56.0 sigs.k8s.io/controller-runtime v0.12.2 ) @@ -30,19 +32,19 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.9.5+incompatible // indirect + github.com/emicklei/go-restful v2.15.0+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-logr/zapr v1.2.0 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.5 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/swag v0.21.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.5 // indirect + github.com/google/go-cmp v0.5.6 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -50,11 +52,14 @@ require ( github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 // indirect + github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/pborman/uuid v1.2.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/client_golang v1.12.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect @@ -81,6 +86,7 @@ require ( k8s.io/component-base v0.24.2 // indirect k8s.io/klog/v2 v2.60.1 // indirect k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect + kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index 5d2f3f759..a1b54e9ff 100644 --- a/go.sum +++ b/go.sum @@ -113,6 +113,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= +github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= +github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -122,8 +127,9 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -158,19 +164,23 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -230,14 +240,16 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -251,10 +263,13 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -334,8 +349,9 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -385,8 +401,15 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 h1:+ShYlGoPriGahTTFTjQ0RtNXW0srxDodk2STdc238Rk= +github.com/openshift/api v0.0.0-20211217221424-8779abfbd571/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= +github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -434,6 +457,7 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -475,6 +499,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= @@ -521,6 +546,7 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -569,6 +595,7 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -613,11 +640,14 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= @@ -648,6 +678,7 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -701,12 +732,14 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -776,6 +809,7 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -793,6 +827,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -926,6 +962,7 @@ gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -952,15 +989,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= k8s.io/client-go v0.24.2 h1:CoXFSf8if+bLEbinDqN9ePIDGzcLtqhfd6jpfnwGOFA= k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30= +k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.24.2 h1:kwpQdoSfbcH+8MPN4tALtajLDfSfYxBDYlXobNWI6OU= k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM= @@ -968,22 +1011,36 @@ k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +kubevirt.io/api v0.59.0 h1:UDsJWklzd0x/w3EQjc48jafZc4p4vVxKUpmBhg2nVRk= +kubevirt.io/api v0.59.0/go.mod h1:zts/6mioR8vGgvYmQ17Cb9XsUR9e/WjJcdokmrE38wY= +kubevirt.io/containerized-data-importer-api v1.56.0 h1:Ehc6CbT3mG2uz+9s3t2N4HnpdK5GfQMt2DCCXCz2sDI= +kubevirt.io/containerized-data-importer-api v1.56.0/go.mod h1:92HiQEyzPoeMiCbgfG5Qe10JQVbtWMZOXucy56dKdGg= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/.gitignore index cece7be66..446be09b4 100644 --- a/vendor/github.com/emicklei/go-restful/.gitignore +++ b/vendor/github.com/emicklei/go-restful/.gitignore @@ -68,3 +68,4 @@ examples/restful-html-template s.html restful-path-tail +.idea diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/.travis.yml index b22f8f547..3a0bf5ff1 100644 --- a/vendor/github.com/emicklei/go-restful/.travis.yml +++ b/vendor/github.com/emicklei/go-restful/.travis.yml @@ -3,4 +3,11 @@ language: go go: - 1.x -script: go test -v \ No newline at end of file +before_install: + - go test -v + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index e52529631..f7409d546 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,59 +1,100 @@ -## Change history of go-restful +# Change history of go-restful (v2 only) +## v2.15.0 - 2020-11-10 + +- Add OPTIONS in Webservice + +## v2.14.3 - 2020-08-31 +- Fixed duplicate compression in dispatch. #449 + + +## v2.14.2 - 2020-08-31 + +- Added check on writer to prevent compression of response twice. #447 + +## v2.14.0 - 2020-08-19 + +- Enable content encoding on Handle and ServeHTTP (#446) +- List available representations in 406 body (#437) +- Convert to string using rune() (#443) + +## v2.13.0 - 2020-06-21 + +- 405 Method Not Allowed must have Allow header (#436) +- add field allowedMethodsWithoutContentType (#424) + +## v2.12.0 + +- support describing response headers (#426) +- fix openapi examples (#425) +- merge v3 fix (#422) + +## v2.11.1 + +- fix WriteError return value (#415) + +## v2.11.0 + +- allow prefix and suffix in path variable expression (#414) + +## v2.9.6 + +- support google custome verb (#413) + +## v2.9.5 -v2.9.5 - fix panic in Response.WriteError if err == nil -v2.9.4 +## v2.9.4 - fix issue #400 , parsing mime type quality - Route Builder added option for contentEncodingEnabled (#398) -v2.9.3 +## v2.9.3 - Avoid return of 415 Unsupported Media Type when request body is empty (#396) -v2.9.2 +## v2.9.2 - Reduce allocations in per-request methods to improve performance (#395) -v2.9.1 +## v2.9.1 - Fix issue with default responses and invalid status code 0. (#393) -v2.9.0 +## v2.9.0 - add per Route content encoding setting (overrides container setting) -v2.8.0 +## v2.8.0 - add Request.QueryParameters() - add json-iterator (via build tag) - disable vgo module (until log is moved) -v2.7.1 +## v2.7.1 - add vgo module -v2.6.1 +## v2.6.1 - add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) -v2.6.0 +## v2.6.0 - Make JSR 311 routing and path param processing consistent - Adding description to RouteBuilder.Reads() - Update example for Swagger12 and OpenAPI -2017-09-13 +## 2017-09-13 - added route condition functions using `.If(func)` in route building. -2017-02-16 +## 2017-02-16 - solved issue #304, make operation names unique -2017-01-30 +## 2017-01-30 [IMPORTANT] For swagger users, change your import statement to: swagger "github.com/emicklei/go-restful-swagger12" @@ -61,60 +102,60 @@ v2.6.0 - moved swagger 1.2 code to go-restful-swagger12 - created TAG 2.0.0 -2017-01-27 +## 2017-01-27 - remove defer request body close - expose Dispatch for testing filters and Routefunctions - swagger response model cannot be array - created TAG 1.0.0 -2016-12-22 +## 2016-12-22 - (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) -2016-11-26 +## 2016-11-26 - Default change! now use CurlyRouter (was RouterJSR311) - Default change! no more caching of request content - Default change! do not recover from panics -2016-09-22 +## 2016-09-22 - fix the DefaultRequestContentType feature -2016-02-14 +## 2016-02-14 - take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response - add constructors for custom entity accessors for xml and json -2015-09-27 +## 2015-09-27 - rename new WriteStatusAnd... to WriteHeaderAnd... for consistency -2015-09-25 +## 2015-09-25 - fixed problem with changing Header after WriteHeader (issue 235) -2015-09-14 +## 2015-09-14 - changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) - added support for custom EntityReaderWriters. -2015-08-06 +## 2015-08-06 - add support for reading entities from compressed request content - use sync.Pool for compressors of http response and request body - add Description to Parameter for documentation in Swagger UI -2015-03-20 +## 2015-03-20 - add configurable logging -2015-03-18 +## 2015-03-18 - if not specified, the Operation is derived from the Route function -2015-03-17 +## 2015-03-17 - expose Parameter creation functions - make trace logger an interface @@ -123,26 +164,26 @@ v2.6.0 - JSR311 router now handles wildcards - add Notes to Route -2014-11-27 +## 2014-11-27 - (api add) PrettyPrint per response. (as proposed in #167) -2014-11-12 +## 2014-11-12 - (api add) ApiVersion(.) for documentation in Swagger UI -2014-11-10 +## 2014-11-10 - (api change) struct fields tagged with "description" show up in Swagger UI -2014-10-31 +## 2014-10-31 - (api change) ReturnsError -> Returns - (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder - fix swagger nested structs - sort Swagger response messages by code -2014-10-23 +## 2014-10-23 - (api add) ReturnsError allows you to document Http codes in swagger - fixed problem with greedy CurlyRouter @@ -156,73 +197,73 @@ v2.6.0 - (api add) added AllowedDomains in CORS - (api add) ParameterNamed for detailed documentation -2014-04-16 +## 2014-04-16 - (api add) expose constructor of Request for testing. -2014-06-27 +## 2014-06-27 - (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). - (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). -2014-07-03 +## 2014-07-03 - (api add) CORS can be configured with a list of allowed domains -2014-03-12 +## 2014-03-12 - (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) -2014-02-26 +## 2014-02-26 - (api add) Request now provides information about the matched Route, see method SelectedRoutePath -2014-02-17 +## 2014-02-17 - (api change) renamed parameter constants (go-lint checks) -2014-01-10 +## 2014-01-10 - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier -2014-01-07 +## 2014-01-07 - (api change) Write* methods in Response now return the error or nil. - added example of serving HTML from a Go template. - fixed comparing Allowed headers in CORS (is now case-insensitive) -2013-11-13 +## 2013-11-13 - (api add) Response knows how many bytes are written to the response body. -2013-10-29 +## 2013-10-29 - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. -2013-10-04 +## 2013-10-04 - (api add) Response knows what HTTP status has been written - (api add) Request can have attributes (map of string->interface, also called request-scoped variables -2013-09-12 +## 2013-09-12 - (api change) Router interface simplified - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths -2013-08-05 +## 2013-08-05 - add OPTIONS support - add CORS support -2013-08-27 +## 2013-08-27 - fixed some reported issues (see github) - (api change) deprecated use of WriteError; use WriteErrorString instead -2014-04-15 +## 2014-04-15 - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString -2013-08-08 +## 2013-08-08 - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - (api add) the swagger package has be extended to have a UI per container. @@ -235,38 +276,38 @@ Important API changes: - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. -2013-07-06 +## 2013-07-06 - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. -2013-06-19 +## 2013-06-19 - (improve) DoNotRecover option, moved request body closer, improved ReadEntity -2013-06-03 +## 2013-06-03 - (api change) removed Dispatcher interface, hide PathExpression - changed receiver names of type functions to be more idiomatic Go -2013-06-02 +## 2013-06-02 - (optimize) Cache the RegExp compilation of Paths. -2013-05-22 +## 2013-05-22 - (api add) Added support for request/response filter functions -2013-05-18 +## 2013-05-18 - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - (api change) Moved Swagger Webservice to swagger package (see example restful-user) -[2012-11-14 .. 2013-05-18> +## [2012-11-14 .. 2013-05-18> - See https://github.com/emicklei/go-restful/commits -2012-11-14 +## 2012-11-14 - Initial commit diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile index b40081cc0..3a824ac3d 100644 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ b/vendor/github.com/emicklei/go-restful/Makefile @@ -1,7 +1,5 @@ all: test test: - go test -v . - -ex: - cd examples && ls *.go | xargs go build -o /tmp/ignore \ No newline at end of file + go vet . + go test -cover -v . \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index f52c25acf..e5878a668 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -4,9 +4,10 @@ package for building REST-style Web Services using Google Go [![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful) +[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) -- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples) +- [Code examples using v3](https://github.com/emicklei/go-restful/tree/master/examples) REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: @@ -18,6 +19,28 @@ REST asks developers to use HTTP methods explicitly and in a way that's consiste - PATCH = Update partial content of a resource - OPTIONS = Get information about the communication options for the request URI +### Usage + +#### Using Go Modules + +As of version `v3.0.0` (on the v3 branch), this package supports Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful/v3" +) +``` + +#### Without Go Modules + +All versions up to `v2.*.*` (on the master) are not supporting Go modules. + +``` +import ( + restful "github.com/emicklei/go-restful" +) +``` + ### Example ```Go @@ -38,14 +61,14 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo ... } ``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go) - + +[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/user-resource/restful-user-resource.go) + ### Features -- Routes for request → function mapping with path parameter (e.g. {id}) support +- Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support - Configurable router: - - (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*} + - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions - Request API for reading structs from JSON/XML and accesing parameters (path,query,header) - Response API for writing structs to JSON/XML and setting headers @@ -85,4 +108,4 @@ TODO: write examples of these. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2020, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/container.go index 061a8d718..afca312a4 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/container.go @@ -185,6 +185,11 @@ func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) // when a ServiceError is returned during route selection. Default implementation // calls resp.WriteErrorString(err.Code, err.Message) func writeServiceError(err ServiceError, req *Request, resp *Response) { + for header, values := range err.Header { + for _, value := range values { + resp.Header().Add(header, value) + } + } resp.WriteErrorString(err.Code, err.Message) } @@ -201,6 +206,7 @@ func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.R // Dispatch the incoming Http Request to a matching WebService. func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // so we can assign a compressing one later writer := httpWriter // CompressingResponseWriter should be closed after all operations are done @@ -231,28 +237,8 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R c.webServices, httpRequest) }() - - // Detect if compression is needed - // assume without compression, test for override - contentEncodingEnabled := c.contentEncodingEnabled - if route != nil && route.contentEncodingEnabled != nil { - contentEncodingEnabled = *route.contentEncodingEnabled - } - if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) - if doCompress { - var err error - writer, err = NewCompressingResponseWriter(httpWriter, encoding) - if err != nil { - log.Print("unable to install compressor: ", err) - httpWriter.WriteHeader(http.StatusInternalServerError) - return - } - } - } - if err != nil { - // a non-200 response has already been written + // a non-200 response (may be compressed) has already been written // run container filters anyway ; they should not touch the response... chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { switch err.(type) { @@ -265,6 +251,29 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer)) return } + + // Unless httpWriter is already an CompressingResponseWriter see if we need to install one + if _, isCompressing := httpWriter.(*CompressingResponseWriter); !isCompressing { + // Detect if compression is needed + // assume without compression, test for override + contentEncodingEnabled := c.contentEncodingEnabled + if route != nil && route.contentEncodingEnabled != nil { + contentEncodingEnabled = *route.contentEncodingEnabled + } + if contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + } + pathProcessor, routerProcessesPath := c.router.(PathProcessor) if !routerProcessesPath { pathProcessor = defaultPathProcessor{} @@ -272,16 +281,13 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R pathParams := pathProcessor.ExtractParameters(route, webService, httpRequest.URL.Path) wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest, pathParams) // pass through filters (if any) - if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 { + if size := len(c.containerFilters) + len(webService.filters) + len(route.Filters); size > 0 { // compose filter chain - allFilters := []FilterFunction{} + allFilters := make([]FilterFunction, 0, size) allFilters = append(allFilters, c.containerFilters...) allFilters = append(allFilters, webService.filters...) allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) { - // handle request by route after passing all filters - route.Function(wrappedRequest, wrappedResponse) - }} + chain := FilterChain{Filters: allFilters, Target: route.Function} chain.ProcessFilter(wrappedRequest, wrappedResponse) } else { // no filters, handle request by route @@ -299,13 +305,75 @@ func fixedPrefixPath(pathspec string) string { } // ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server -func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) { - c.ServeMux.ServeHTTP(httpwriter, httpRequest) +func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if content encoding is disabled + if !c.contentEncodingEnabled { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + // content encoding is enabled + + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + c.ServeMux.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + + c.ServeMux.ServeHTTP(writer, httpRequest) } // Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics. func (c *Container) Handle(pattern string, handler http.Handler) { - c.ServeMux.Handle(pattern, handler) + c.ServeMux.Handle(pattern, http.HandlerFunc(func(httpWriter http.ResponseWriter, httpRequest *http.Request) { + // Skip, if httpWriter is already an CompressingResponseWriter + if _, ok := httpWriter.(*CompressingResponseWriter); ok { + handler.ServeHTTP(httpWriter, httpRequest) + return + } + + writer := httpWriter + + // CompressingResponseWriter should be closed after all operations are done + defer func() { + if compressWriter, ok := writer.(*CompressingResponseWriter); ok { + compressWriter.Close() + } + }() + + if c.contentEncodingEnabled { + doCompress, encoding := wantsCompressedResponse(httpRequest) + if doCompress { + var err error + writer, err = NewCompressingResponseWriter(httpWriter, encoding) + if err != nil { + log.Print("unable to install compressor: ", err) + httpWriter.WriteHeader(http.StatusInternalServerError) + return + } + } + } + + handler.ServeHTTP(writer, httpRequest) + })) } // HandleWithFilter registers the handler for the given pattern. @@ -319,7 +387,7 @@ func (c *Container) HandleWithFilter(pattern string, handler http.Handler) { } chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) { - handler.ServeHTTP(httpResponse, httpRequest) + handler.ServeHTTP(resp, req.Request) }} chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse)) } diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/curly.go index 14d5b76bf..ba1fc5d5f 100644 --- a/vendor/github.com/emicklei/go-restful/curly.go +++ b/vendor/github.com/emicklei/go-restful/curly.go @@ -47,7 +47,7 @@ func (c CurlyRouter) SelectRoute( func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes { candidates := make(sortableCurlyRoutes, 0, 8) for _, each := range ws.routes { - matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens) + matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens, each.hasCustomVerb) if matches { candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers? } @@ -57,7 +57,7 @@ func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortab } // matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are. -func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) { +func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string, routeHasCustomVerb bool) (matches bool, paramCount int, staticCount int) { if len(routeTokens) < len(requestTokens) { // proceed in matching only if last routeToken is wildcard count := len(routeTokens) @@ -72,6 +72,15 @@ func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []strin return false, 0, 0 } requestToken := requestTokens[i] + if routeHasCustomVerb && hasCustomVerb(routeToken){ + if !isMatchCustomVerb(routeToken, requestToken) { + return false, 0, 0 + } + staticCount++ + requestToken = removeCustomVerb(requestToken) + routeToken = removeCustomVerb(routeToken) + } + if strings.HasPrefix(routeToken, "{") { paramCount++ if colon := strings.Index(routeToken, ":"); colon != -1 { diff --git a/vendor/github.com/emicklei/go-restful/custom_verb.go b/vendor/github.com/emicklei/go-restful/custom_verb.go new file mode 100644 index 000000000..bfc17efde --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/custom_verb.go @@ -0,0 +1,29 @@ +package restful + +import ( + "fmt" + "regexp" +) + +var ( + customVerbReg = regexp.MustCompile(":([A-Za-z]+)$") +) + +func hasCustomVerb(routeToken string) bool { + return customVerbReg.MatchString(routeToken) +} + +func isMatchCustomVerb(routeToken string, pathToken string) bool { + rs := customVerbReg.FindStringSubmatch(routeToken) + if len(rs) < 2 { + return false + } + + customVerb := rs[1] + specificVerbReg := regexp.MustCompile(fmt.Sprintf(":%s$", customVerb)) + return specificVerbReg.MatchString(pathToken) +} + +func removeCustomVerb(str string) string { + return customVerbReg.ReplaceAllString(str, "") +} diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/jsr311.go index 3ede1891e..9cfd59a1c 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/jsr311.go @@ -9,6 +9,7 @@ import ( "fmt" "net/http" "sort" + "strings" ) // RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions) @@ -98,7 +99,18 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(previous), httpRequest.Method) } - return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed") + allowed := []string{} + allowedLoop: + for _, candidate := range previous { + for _, method := range allowed { + if method == candidate.Method { + continue allowedLoop + } + } + allowed = append(allowed, candidate.Method) + } + header := http.Header{"Allow": []string{strings.Join(allowed, ", ")}} + return nil, NewErrorWithHeader(http.StatusMethodNotAllowed, "405: Method Not Allowed", header) } // content-type @@ -135,7 +147,14 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(previous), accept) } - return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable") + available := []string{} + for _, candidate := range previous { + available = append(available, candidate.Produces...) + } + return nil, NewError( + http.StatusNotAcceptable, + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/path_processor.go index 357c723a7..141573245 100644 --- a/vendor/github.com/emicklei/go-restful/path_processor.go +++ b/vendor/github.com/emicklei/go-restful/path_processor.go @@ -29,7 +29,12 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath } else { value = urlParts[i] } - if strings.HasPrefix(key, "{") { // path-parameter + if r.hasCustomVerb && hasCustomVerb(key) { + key = removeCustomVerb(key) + value = removeCustomVerb(value) + } + + if strings.Index(key, "{") > -1 { // path-parameter if colon := strings.Index(key, ":"); colon != -1 { // extract by regex regPart := key[colon+1 : len(key)-1] @@ -42,7 +47,13 @@ func (d defaultPathProcessor) ExtractParameters(r *Route, _ *WebService, urlPath } } else { // without enclosing {} - pathParameters[key[1:len(key)-1]] = value + startIndex := strings.Index(key, "{") + endKeyIndex := strings.Index(key, "}") + + suffixLength := len(key) - endKeyIndex - 1 + endValueIndex := len(value) - suffixLength + + pathParameters[key[startIndex+1:endKeyIndex]] = value[startIndex:endValueIndex] } } } diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/response.go index fbb48f2da..e2f78f00f 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/response.go @@ -174,15 +174,16 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType return writeJSON(r, status, contentType, value) } -// WriteError write the http status and the error string on the response. err can be nil. -func (r *Response) WriteError(httpStatus int, err error) error { +// WriteError writes the http status and the error string on the response. err can be nil. +// Return an error if writing was not succesful. +func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { r.err = err if err == nil { - r.WriteErrorString(httpStatus, "") + writeErr = r.WriteErrorString(httpStatus, "") } else { - r.WriteErrorString(httpStatus, err.Error()) + writeErr = r.WriteErrorString(httpStatus, err.Error()) } - return err + return writeErr } // WriteServiceError is a convenience method for a responding with a status and a ServiceError diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/route.go index 6d15dbf66..598aa57a7 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/route.go @@ -49,11 +49,20 @@ type Route struct { //Overrides the container.contentEncodingEnabled contentEncodingEnabled *bool + + // indicate route path has custom verb + hasCustomVerb bool + + // if a request does not include a content-type header then + // depending on the method, it may return a 415 Unsupported Media + // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... + allowedMethodsWithoutContentType []string } // Initialize for Route func (r *Route) postBuild() { r.pathParts = tokenizePath(r.Path) + r.hasCustomVerb = hasCustomVerb(r.Path) } // Create Request and Response from their http versions @@ -67,17 +76,6 @@ func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest return wrappedRequest, wrappedResponse } -// dispatchWithFilters call the function after passing through its own filters -func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) { - if len(r.Filters) > 0 { - chain := FilterChain{Filters: r.Filters, Target: r.Function} - chain.ProcessFilter(wrappedRequest, wrappedResponse) - } else { - // unfiltered - r.Function(wrappedRequest, wrappedResponse) - } -} - func stringTrimSpaceCutset(r rune) bool { return r == ' ' } @@ -121,8 +119,17 @@ func (r Route) matchesContentType(mimeTypes string) bool { if len(mimeTypes) == 0 { // idempotent methods with (most-likely or guaranteed) empty content match missing Content-Type m := r.Method - if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { - return true + // if route specifies less or non-idempotent methods then use that + if len(r.allowedMethodsWithoutContentType) > 0 { + for _, each := range r.allowedMethodsWithoutContentType { + if m == each { + return true + } + } + } else { + if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" { + return true + } } // proceed with default mimeTypes = MIME_OCTET diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/route_builder.go index 0fccf61e9..1d67a4c23 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/route_builder.go @@ -17,14 +17,15 @@ import ( // RouteBuilder is a helper to construct Routes. type RouteBuilder struct { - rootPath string - currentPath string - produces []string - consumes []string - httpMethod string // required - function RouteFunction // required - filters []FilterFunction - conditions []RouteSelectionConditionFunction + rootPath string + currentPath string + produces []string + consumes []string + httpMethod string // required + function RouteFunction // required + filters []FilterFunction + conditions []RouteSelectionConditionFunction + allowedMethodsWithoutContentType []string // see Route typeNameHandleFunc TypeNameHandleFunction // required @@ -176,6 +177,15 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou return b } +// ReturnsWithHeaders is similar to Returns, but can specify response headers +func (b *RouteBuilder) ReturnsWithHeaders(code int, message string, model interface{}, headers map[string]Header) *RouteBuilder { + b.Returns(code, message, model) + err := b.errorMap[code] + err.Headers = headers + b.errorMap[code] = err + return b +} + // DefaultReturns is a special Returns call that sets the default of the response. func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder { b.defaultResponse = &ResponseError{ @@ -200,14 +210,41 @@ func (b *RouteBuilder) Deprecate() *RouteBuilder { return b } +// AllowedMethodsWithoutContentType overides the default list GET,HEAD,OPTIONS,DELETE,TRACE +// If a request does not include a content-type header then +// depending on the method, it may return a 415 Unsupported Media. +// Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... +func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *RouteBuilder { + b.allowedMethodsWithoutContentType = methods + return b +} + // ResponseError represents a response; not necessarily an error. type ResponseError struct { Code int Message string Model interface{} + Headers map[string]Header IsDefault bool } +// Header describes a header for a response of the API +// +// For more information: http://goo.gl/8us55a#headerObject +type Header struct { + *Items + Description string +} + +// Items describe swagger simple schemas for headers +type Items struct { + Type string + Format string + Items *Items + CollectionFormat string + Default interface{} +} + func (b *RouteBuilder) servicePath(path string) *RouteBuilder { b.rootPath = path return b @@ -276,26 +313,27 @@ func (b *RouteBuilder) Build() Route { operationName = nameOfFunction(b.function) } route := Route{ - Method: b.httpMethod, - Path: concatPath(b.rootPath, b.currentPath), - Produces: b.produces, - Consumes: b.consumes, - Function: b.function, - Filters: b.filters, - If: b.conditions, - relativePath: b.currentPath, - pathExpr: pathExpr, - Doc: b.doc, - Notes: b.notes, - Operation: operationName, - ParameterDocs: b.parameters, - ResponseErrors: b.errorMap, - DefaultResponse: b.defaultResponse, - ReadSample: b.readSample, - WriteSample: b.writeSample, - Metadata: b.metadata, - Deprecated: b.deprecated, - contentEncodingEnabled: b.contentEncodingEnabled, + Method: b.httpMethod, + Path: concatPath(b.rootPath, b.currentPath), + Produces: b.produces, + Consumes: b.consumes, + Function: b.function, + Filters: b.filters, + If: b.conditions, + relativePath: b.currentPath, + pathExpr: pathExpr, + Doc: b.doc, + Notes: b.notes, + Operation: operationName, + ParameterDocs: b.parameters, + ResponseErrors: b.errorMap, + DefaultResponse: b.defaultResponse, + ReadSample: b.readSample, + WriteSample: b.writeSample, + Metadata: b.metadata, + Deprecated: b.deprecated, + contentEncodingEnabled: b.contentEncodingEnabled, + allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } route.postBuild() return route diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/service_error.go index 62d1108bb..a41575469 100644 --- a/vendor/github.com/emicklei/go-restful/service_error.go +++ b/vendor/github.com/emicklei/go-restful/service_error.go @@ -4,12 +4,16 @@ package restful // Use of this source code is governed by a license // that can be found in the LICENSE file. -import "fmt" +import ( + "fmt" + "net/http" +) // ServiceError is a transport object to pass information about a non-Http error occurred in a WebService while processing a request. type ServiceError struct { Code int Message string + Header http.Header } // NewError returns a ServiceError using the code and reason @@ -17,6 +21,11 @@ func NewError(code int, message string) ServiceError { return ServiceError{Code: code, Message: message} } +// NewErrorWithHeader returns a ServiceError using the code, reason and header +func NewErrorWithHeader(code int, message string, header http.Header) ServiceError { + return ServiceError{Code: code, Message: message, Header: header} +} + // Error returns a text representation of the service error func (s ServiceError) Error() string { return fmt.Sprintf("[ServiceError:%v] %v", s.Code, s.Message) diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/web_service.go index 77ba9a8cf..2c164a2a2 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/web_service.go @@ -188,7 +188,7 @@ func (w *WebService) RemoveRoute(path, method string) error { continue } newRoutes[current] = w.routes[ix] - current = current + 1 + current++ } w.routes = newRoutes return nil @@ -288,3 +288,8 @@ func (w *WebService) PATCH(subPath string) *RouteBuilder { func (w *WebService) DELETE(subPath string) *RouteBuilder { return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath) } + +// OPTIONS is a shortcut for .Method("OPTIONS").Path(subPath) +func (w *WebService) OPTIONS(subPath string) *RouteBuilder { + return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("OPTIONS").Path(subPath) +} diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md index ad825f5f0..ab5931181 100644 --- a/vendor/github.com/go-logr/logr/README.md +++ b/vendor/github.com/go-logr/logr/README.md @@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated. There are implementations for the following logging libraries: - **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr) - **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) - **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting) - **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) - **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) - **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) - **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) - **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) +- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0) +- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing) ## FAQ diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index 44cd398c9..c3b56b3d2 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -43,7 +43,9 @@ limitations under the License. // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional -// information (such as stack traces) on calls to Error(). +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. // // Verbosity // @@ -53,6 +55,7 @@ limitations under the License. // Log-lines with V-levels that are not enabled (as per the LogSink) will not // be written. Level V(0) is the default, and logger.V(0).Info() has the same // meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. // // Where we might have written: // if flVerbose >= 2 { @@ -112,6 +115,15 @@ limitations under the License. // may be any Go value, but how the value is formatted is determined by the // LogSink implementation. // +// Logger instances are meant to be passed around by value. Code that receives +// such a value can call its methods without having to check whether the +// instance is ready for use. +// +// Calling methods with the null logger (Logger{}) as instance will crash +// because it has no LogSink. Therefore this null logger should never be passed +// around. For cases where passing a logger is optional, a pointer to Logger +// should be used. +// // Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but @@ -253,11 +265,13 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // Error logs an error, with the given message and key/value pairs as context. // It functions similarly to Info, but may have unique behavior, and should be // preferred for logging errors (see the package documentations for more -// information). +// information). The log message will always be emitted, regardless of +// verbosity level. // // The msg argument should be used to add context to any underlying error, // while the err argument should be used to attach the actual error that -// triggered this log line, if present. +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 000000000..49ad52766 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 813c47aa6..2a4a71f3a 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -37,3 +37,14 @@ linters: - gci - gocognit - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a8872..000000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 000000000..16accc55f --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d31..f5228b82c 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8bc..7c7da9c08 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f3393..2757d9b95 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d1..0565db377 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 3d45c1a47..f01eff318 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -315,7 +315,7 @@ func (tf Transform) Option() Option { return tf.trans } // pops the address from the stack. Thus, when traversing into a pointer from // reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles // by checking whether the pointer has already been visited. The cycle detection -// uses a seperate stack for the x and y values. +// uses a separate stack for the x and y values. // // If a cycle is detected we need to determine whether the two pointers // should be considered equal. The definition of equality chosen by Equal diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 168f92f3c..2ad3bc85b 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -7,6 +7,7 @@ package cmp import ( "bytes" "fmt" + "math" "reflect" "strconv" "strings" @@ -96,15 +97,16 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { } // Auto-detect the type of the data. - var isLinedText, isText, isBinary bool var sx, sy string + var ssx, ssy []string + var isString, isMostlyText, isPureLinedText, isBinary bool switch { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() - isText = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isBinary = true // Initial estimate, verify later + isString = true case t.Kind() == reflect.Array: // Arrays need to be addressable for slice operations to work. vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() @@ -112,13 +114,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { vy2.Set(vy) vx, vy = vx2, vy2 } - if isText || isBinary { - var numLines, lastLineIdx, maxLineLen int - isBinary = !utf8.ValidString(sx) || !utf8.ValidString(sy) + if isString { + var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int for i, r := range sx + sy { - if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { - isBinary = true - break + numTotalRunes++ + if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { + numValidRunes++ } if r == '\n' { if maxLineLen < i-lastLineIdx { @@ -128,8 +129,26 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { numLines++ } } - isText = !isBinary - isLinedText = isText && numLines >= 4 && maxLineLen <= 1024 + isPureText := numValidRunes == numTotalRunes + isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) + isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 + isBinary = !isMostlyText + + // Avoid diffing by lines if it produces a significantly more complex + // edit script than diffing by bytes. + if isPureLinedText { + ssx = strings.Split(sx, "\n") + ssy = strings.Split(sy, "\n") + esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { + return diff.BoolResult(ssx[ix] == ssy[iy]) + }) + esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { + return diff.BoolResult(sx[ix] == sy[iy]) + }) + efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) + efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) + isPureLinedText = efficiencyLines < 4*efficiencyBytes + } } // Format the string into printable records. @@ -138,9 +157,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch { // If the text appears to be multi-lined text, // then perform differencing across individual lines. - case isLinedText: - ssx := strings.Split(sx, "\n") - ssy := strings.Split(sy, "\n") + case isPureLinedText: list = opts.formatDiffSlice( reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", func(v reflect.Value, d diffMode) textRecord { @@ -229,7 +246,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // If the text appears to be single-lined text, // then perform differencing in approximately fixed-sized chunks. // The output is printed as quoted strings. - case isText: + case isMostlyText: list = opts.formatDiffSlice( reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", func(v reflect.Value, d diffMode) textRecord { @@ -237,7 +254,6 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { return textRecord{Diff: d, Value: textLine(s)} }, ) - delim = "" // If the text appears to be binary data, // then perform differencing in approximately fixed-sized chunks. @@ -299,7 +315,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // Wrap the output with appropriate type information. var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isText { + if !isMostlyText { // The "{...}" byte-sequence literal is not valid Go syntax for strings. // Emit the type for extra clarity (e.g. "string{...}"). if t.Kind() == reflect.String { @@ -338,8 +354,11 @@ func (opts formatOptions) formatDiffSlice( vx, vy reflect.Value, chunkSize int, name string, makeRec func(reflect.Value, diffMode) textRecord, ) (list textList) { - es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { - return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + eq := func(ix, iy int) bool { + return vx.Index(ix).Interface() == vy.Index(iy).Interface() + } + es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { + return diff.BoolResult(eq(ix, iy)) }) appendChunks := func(v reflect.Value, d diffMode) int { @@ -364,6 +383,7 @@ func (opts formatOptions) formatDiffSlice( groups := coalesceAdjacentEdits(name, es) groups = coalesceInterveningIdentical(groups, chunkSize/4) + groups = cleanupSurroundingIdentical(groups, eq) maxGroup := diffStats{Name: name} for i, ds := range groups { if maxLen >= 0 && numDiffs >= maxLen { @@ -416,25 +436,36 @@ func (opts formatOptions) formatDiffSlice( // coalesceAdjacentEdits coalesces the list of edits into groups of adjacent // equal or unequal counts. +// +// Example: +// +// Input: "..XXY...Y" +// Output: [ +// {NumIdentical: 2}, +// {NumRemoved: 2, NumInserted 1}, +// {NumIdentical: 3}, +// {NumInserted: 1}, +// ] +// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { + var prevMode byte + lastStats := func(mode byte) *diffStats { + if prevMode != mode { groups = append(groups, diffStats{Name: name}) - prevCase = i + prevMode = mode } return &groups[len(groups)-1] } for _, e := range es { switch e { case diff.Identity: - lastStats(1).NumIdentical++ + lastStats('=').NumIdentical++ case diff.UniqueX: - lastStats(2).NumRemoved++ + lastStats('!').NumRemoved++ case diff.UniqueY: - lastStats(2).NumInserted++ + lastStats('!').NumInserted++ case diff.Modified: - lastStats(2).NumModified++ + lastStats('!').NumModified++ } } return groups @@ -444,6 +475,35 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // equal groups into adjacent unequal groups that currently result in a // dual inserted/removed printout. This acts as a high-pass filter to smooth // out high-frequency changes within the windowSize. +// +// Example: +// +// WindowSize: 16, +// Input: [ +// {NumIdentical: 61}, // group 0 +// {NumRemoved: 3, NumInserted: 1}, // group 1 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 9}, // └── coalesce +// {NumIdentical: 64}, // group 2 +// {NumRemoved: 3, NumInserted: 1}, // group 3 +// {NumIdentical: 6}, // ├── coalesce +// {NumInserted: 2}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 7}, // ├── coalesce +// {NumIdentical: 1}, // ├── coalesce +// {NumRemoved: 2}, // └── coalesce +// {NumIdentical: 63}, // group 4 +// ] +// Output: [ +// {NumIdentical: 61}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 64}, +// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, +// {NumIdentical: 63}, +// ] +// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -463,3 +523,91 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat } return groups } + +// cleanupSurroundingIdentical scans through all unequal groups, and +// moves any leading sequence of equal elements to the preceding equal group and +// moves and trailing sequence of equal elements to the succeeding equal group. +// +// This is necessary since coalesceInterveningIdentical may coalesce edit groups +// together such that leading/trailing spans of equal elements becomes possible. +// Note that this can occur even with an optimal diffing algorithm. +// +// Example: +// +// Input: [ +// {NumIdentical: 61}, +// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements +// {NumIdentical: 67}, +// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements +// {NumIdentical: 54}, +// ] +// Output: [ +// {NumIdentical: 64}, // incremented by 3 +// {NumRemoved: 9}, +// {NumIdentical: 67}, +// {NumRemoved: 9}, +// {NumIdentical: 64}, // incremented by 10 +// ] +// +func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { + var ix, iy int // indexes into sequence x and y + for i, ds := range groups { + // Handle equal group. + if ds.NumDiff() == 0 { + ix += ds.NumIdentical + iy += ds.NumIdentical + continue + } + + // Handle unequal group. + nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified + ny := ds.NumIdentical + ds.NumInserted + ds.NumModified + var numLeadingIdentical, numTrailingIdentical int + for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + numLeadingIdentical++ + } + for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + numTrailingIdentical++ + } + if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { + if numLeadingIdentical > 0 { + // Remove leading identical span from this group and + // insert it into the preceding group. + if i-1 >= 0 { + groups[i-1].NumIdentical += numLeadingIdentical + } else { + // No preceding group exists, so prepend a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) + }() + } + // Increment indexes since the preceding group would have handled this. + ix += numLeadingIdentical + iy += numLeadingIdentical + } + if numTrailingIdentical > 0 { + // Remove trailing identical span from this group and + // insert it into the succeeding group. + if i+1 < len(groups) { + groups[i+1].NumIdentical += numTrailingIdentical + } else { + // No succeeding group exists, so append a new group, + // but do so after we finish iterating over all groups. + defer func() { + groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) + }() + } + // Do not increment indexes since the succeeding group will handle this. + } + + // Update this group since some identical elements were removed. + nx -= numIdentical + ny -= numIdentical + groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} + } + ix += nx + iy += ny + } + return groups +} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65a..b5f5e2613 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/openshift/api/LICENSE b/vendor/github.com/openshift/api/LICENSE new file mode 100644 index 000000000..5c389317e --- /dev/null +++ b/vendor/github.com/openshift/api/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml new file mode 100644 index 000000000..f2e2cc365 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusteroperator.crd.yaml @@ -0,0 +1,137 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/497 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusteroperators.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterOperator + listKind: ClusterOperatorList + plural: clusteroperators + shortNames: + - co + singular: clusteroperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The version the operator is at. + jsonPath: .status.versions[?(@.name=="operator")].version + name: Version + type: string + - description: Whether the operator is running and stable. + jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - description: Whether the operator is processing changes. + jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - description: Whether the operator is degraded. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: The time the operator's Available status last changed. + jsonPath: .status.conditions[?(@.type=="Available")].lastTransitionTime + name: Since + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds configuration that could apply to any operator. + type: object + status: + description: status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem. + type: object + properties: + conditions: + description: conditions describes the state of the operator's managed and monitored components. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + extension: + description: extension contains any additional status information specific to the operator which owns this status object. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + relatedObjects: + description: 'relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces' + type: array + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + versions: + description: versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name "operator". An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + type: array + items: + type: object + required: + - name + - version + properties: + name: + description: name is the name of the particular operand this version is for. It usually matches container images, not operators. + type: string + version: + description: version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0 + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml new file mode 100644 index 000000000..477435fd6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -0,0 +1,359 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster. + type: object + required: + - clusterID + properties: + channel: + description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history. \n If an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed." + type: object + properties: + force: + description: force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources. + type: boolean + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + overrides: + description: overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object. + type: array + items: + description: ComponentOverride allows overriding cluster version operator's behavior for a component. + type: object + required: + - group + - kind + - name + - namespace + - unmanaged + properties: + group: + description: group identifies the API group that the kind is in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false' + type: boolean + upstream: + description: upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region. + type: string + status: + description: status contains information about the available updates and any in-progress updates. + type: object + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + type: array + items: + description: Release represents an OpenShift release image and associated metadata. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + nullable: true + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + type: array + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + type: object + required: + - release + - risks + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + type: array + minItems: 1 + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + type: object + required: + - matchingRules + - message + - name + - url + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + type: array + minItems: 1 + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + type: object + required: + - type + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + type: object + required: + - promql + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + type: string + enum: + - Always + - PromQL + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + minLength: 1 + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + type: string + minLength: 1 + url: + description: url contains information about this risk. + type: string + format: uri + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. + type: array + items: + description: ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + type: string + format: date-time + message: + description: message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + desired: + description: desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + history: + description: history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved. + type: array + items: + description: UpdateHistory is a single attempted update to the cluster. + type: object + required: + - completionTime + - image + - startedTime + - state + - verified + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). + type: string + format: date-time + nullable: true + image: + description: image is a container image location that contains the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was started. + type: string + format: date-time + state: + description: state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied). + type: string + verified: + description: verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted. + type: boolean + version: + description: version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty. + type: string + observedGeneration: + description: observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version. + type: integer + format: int64 + versionHash: + description: versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml new file mode 100644 index 000000000..4ba6c01cf --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -0,0 +1,83 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: operatorhubs.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OperatorHub + listKind: OperatorHubList + plural: operatorhubs + singular: operatorhub + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + type: object + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block. + type: array + items: + description: HubSource is used to specify the hub source and its configuration + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here. + type: object + properties: + sources: + description: sources encapsulates the result of applying the configuration for each hub source + type: array + items: + description: HubSourceStatus is used to reflect the current state of applying the configuration to a default source + type: object + properties: + disabled: + description: disabled is used to disable a default hub source on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: status indicates success or failure in applying the configuration + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml new file mode 100644 index 000000000..246225397 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -0,0 +1,78 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: proxies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Proxy + listKind: ProxyList + plural: proxies + singular: proxy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml new file mode 100644 index 000000000..3ff78377a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -0,0 +1,177 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml new file mode 100644 index 000000000..bb695bac7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -0,0 +1,101 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key "oauthMetadata" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.' + type: string + type: + description: type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service. + type: object + required: + - kubeConfig + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config. \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication \n The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + type: array + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key "kubeConfig" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw ''/.well-known/oauth-authorization-server'' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key "oauthMetadata" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml new file mode 100644 index 000000000..f67be27db --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -0,0 +1,271 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: builds.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Build + listKind: BuildList + plural: builds + singular: build + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. \n The canonical name is \"cluster\" \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the build controller configuration + type: object + properties: + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config. \n DEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + buildDefaults: + description: BuildDefaults controls the default information for Builds + type: object + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download. \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + env: + description: Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build + type: array + items: + description: EnvVar represents an environment variable present in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + gitProxy: + description: "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone. \n Values that are not set here will be inherited from DefaultProxy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well. \n The namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + resources: + description: Resources defines resource requirements to execute the build. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + buildOverrides: + description: BuildOverrides controls override settings for builds + type: object + properties: + forcePull: + description: ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself + type: boolean + imageLabels: + description: ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + nodeSelector: + description: NodeSelector is a selector which must be true for the build pod to fit on a node + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml new file mode 100644 index 000000000..188b45e01 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -0,0 +1,57 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoles.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration for console authentication. + type: object + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user''s token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.' + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + consoleURL: + description: The URL for the console. This will be derived from the host for the route that is created for the console. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml new file mode 100644 index 000000000..e4fa56eee --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -0,0 +1,72 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." + type: string + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml new file mode 100644 index 000000000..5254d0ce2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: featuregates.config.openshift.io +spec: + group: config.openshift.io + names: + kind: FeatureGate + listKind: FeatureGateList + plural: featuregates + singular: featuregate + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Feature holds cluster-wide information about feature gates. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + type: object + properties: + disabled: + description: disabled is a list of all feature gates that you want to force off + type: array + items: + type: string + enabled: + description: enabled is a list of all feature gates that you want to force on + type: array + items: + type: string + nullable: true + featureSet: + description: featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml new file mode 100644 index 000000000..a160fef40 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: images.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Image + listKind: ImageList + plural: images + singular: image + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions. + type: array + items: + description: RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'. + type: object + properties: + domainName: + description: domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure. + type: boolean + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + type: array + items: + type: string + registrySources: + description: registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry. + type: object + properties: + allowedRegistries: + description: "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + blockedRegistries: + description: "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. \n Only one of BlockedRegistries or AllowedRegistries may be set." + type: array + items: + type: string + containerRuntimeSearchRegistries: + description: 'containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.' + type: array + format: hostname + minItems: 1 + items: + type: string + x-kubernetes-list-type: set + insecureRegistries: + description: insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in "hostname[:port]" format. + type: array + items: + type: string + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default internal image registry. The value must be in "hostname[:port]" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml new file mode 100644 index 000000000..147c73c44 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/874 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: imagecontentpolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageContentPolicy + listKind: ImageContentPolicyList + plural: imagecontentpolicies + singular: imagecontentpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\". \n Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified." + type: array + items: + description: RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. + type: object + required: + - source + properties: + allowMirrorByTags: + description: allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue. + type: boolean + mirrors: + description: mirrors is zero or more repositories that may also contain the same images. If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. + type: array + items: + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-type: set + source: + description: source is the repository that users refer to, e.g. in image pull specifications. + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-map-keys: + - source + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml new file mode 100644 index 000000000..6be6c6812 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -0,0 +1,461 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + platformSpec: + description: platformSpec holds desired information specific to the underlying infrastructure provider. + type: object + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + type: object + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + type: array + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + type: object + required: + - name + - url + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + format: uri + pattern: ^https:// + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + type: string + controlPlaneTopology: + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + - External + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' + type: string + default: HighlyAvailable + enum: + - HighlyAvailable + - SingleReplica + platform: + description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + platformStatus: + description: platformStatus holds status information specific to the underlying infrastructure provider. + type: object + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + required: + - region + properties: + region: + description: region specifies the region for Alibaba Cloud resources created for the cluster. + type: string + pattern: ^[0-9A-Za-z-]+$ + resourceGroupID: + description: resourceGroupID is the ID of the resource group for the cluster. + type: string + pattern: ^(rg-[0-9A-Za-z]+)?$ + resourceTags: + description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + type: array + maxItems: 20 + items: + description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag. + type: string + maxLength: 128 + minLength: 1 + value: + description: value is the value of the tag. + type: string + maxLength: 128 + minLength: 1 + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + type: object + properties: + region: + description: region holds the default AWS region for new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + type: array + maxItems: 25 + items: + description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag + type: string + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + value: + description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + type: string + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + type: array + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + type: object + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + pattern: ^https:// + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + type: string + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. + type: string + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created for the cluster. + type: string + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + type: string + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + cloudName: + description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' + type: string + powervs: + description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for new Power VS resources created by the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + type: array + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + type: object + required: + - name + - url + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + format: uri + pattern: ^https:// + zone: + description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' + type: string + type: + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml new file mode 100644 index 000000000..95fe8dfd9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -0,0 +1,274 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list. \n To determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes." + type: array + items: + description: ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. + type: object + required: + - hostname + - name + - namespace + properties: + hostname: + description: hostname is the hostname that should be used by the route. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. \n The namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + domain: + description: "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\". \n It is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\". \n Once set, changing domain is not currently supported." + type: string + requiredHSTSPolicies: + description: "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission. \n A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains \n - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation. \n The HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. \n Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid." + type: array + items: + type: object + required: + - domainPatterns + properties: + domainPatterns: + description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*." + type: array + minItems: 1 + items: + type: string + includeSubDomainsPolicy: + description: 'includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host''s domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' + type: string + enum: + - RequireIncludeSubDomains + - RequireNoIncludeSubDomains + - NoOpinion + maxAge: + description: maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client. + type: object + properties: + largestMaxAge: + description: The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced. + type: integer + format: int32 + maximum: 2147483647 + minimum: 0 + smallestMaxAge: + description: The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced. + type: integer + format: int32 + maximum: 2147483647 + minimum: 0 + namespaceSelector: + description: namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + preloadPolicy: + description: preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent). + type: string + enum: + - RequirePreload + - RequireNoPreload + - NoOpinion + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + componentRoutes: + description: componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin. + type: array + items: + description: ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. + type: object + required: + - defaultHostname + - name + - namespace + - relatedObjects + properties: + conditions: + description: "conditions are used to communicate the state of the componentRoutes entry. \n Supported conditions include Available, Degraded and Progressing. \n If available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured. \n If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect. \n If Progressing is true, that means the component is taking some action related to the componentRoutes entry." + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + type: array + maxItems: 5 + items: + description: ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. + type: string + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + currentHostnames: + description: currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + type: array + minItems: 1 + items: + description: Hostname is an alias for hostname string validation. + type: string + format: hostname + defaultHostname: + description: defaultHostname is the hostname of this route prior to customization. + type: string + format: hostname + name: + description: "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 256 + minLength: 1 + namespace: + description: "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times. \n The namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized." + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + relatedObjects: + description: relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + type: array + minItems: 1 + items: + description: ObjectReference contains enough information to let you inspect or modify the referred object. + type: object + required: + - group + - name + - resource + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml new file mode 100644 index 000000000..c01178506 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -0,0 +1,163 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation. + type: array + items: + type: string + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed. + type: string + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + migration: + description: Migration contains the cluster network migration configuration. + type: object + properties: + mtu: + description: MTU contains the MTU migration configuration. + type: object + properties: + machine: + description: Machine contains MTU migration configuration for the machine's uplink. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + network: + description: Network contains MTU migration configuration for the default network. + type: object + properties: + from: + description: From is the MTU to migrate from. + type: integer + format: int32 + minimum: 0 + to: + description: To is the MTU to migrate to. + type: integer + format: int32 + minimum: 0 + networkType: + description: 'NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes' + type: string + enum: + - OpenShiftSDN + - OVNKubernetes + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support a single entry here. + type: array + items: + type: string + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml new file mode 100644 index 000000000..883c623b3 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -0,0 +1,444 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users. + type: array + items: + description: IdentityProvider provides identities for users authenticating using credentials + type: object + properties: + basicAuth: + description: basicAuth contains configuration options for the BasicAuth IdP + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + github: + description: github enables user authentication using GitHub credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations are allowed to log in + type: array + items: + type: string + teams: + description: teams optionally restricts which teams are allowed to log in. Format is /. + type: array + items: + type: string + gitlab: + description: gitlab enables user authentication using GitLab credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the oauth server base URL + type: string + google: + description: google enables user authentication using Google credentials + type: object + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + hostedDomain: + description: hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + type: string + htpasswd: + description: htpasswd enables user authentication using an HTPasswd file to validate credentials + type: object + properties: + fileData: + description: fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key "htpasswd" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + keystone: + description: keystone enables user authentication using keystone password credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key "tls.crt" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key "tls.key" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: url is the remote URL to connect to + type: string + ldap: + description: ldap enables user authentication using LDAP credentials + type: object + properties: + attributes: + description: attributes maps LDAP attributes to identities + type: object + properties: + email: + description: email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + id: + description: id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is "dn" + type: array + items: + type: string + name: + description: name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is "cn" + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is "uid" + type: array + items: + type: string + bindDN: + description: bindDN is an optional DN to bind with during the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key "bindPassword" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + insecure: + description: 'insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always attempt to connect using TLS, even when `insecure` is set to `true` When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + mappingMethod: + description: mappingMethod determines how identities from this provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca.crt" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + claims: + description: claims mappings + type: object + properties: + email: + description: email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity + type: array + items: + type: string + x-kubernetes-list-type: atomic + groups: + description: groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used. + type: array + items: + description: OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo responses + type: string + minLength: 1 + x-kubernetes-list-type: atomic + name: + description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity + type: array + items: + type: string + x-kubernetes-list-type: atomic + preferredUsername: + description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim + type: array + items: + type: string + x-kubernetes-list-type: atomic + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret by name containing the oauth client secret. The key "clientSecret" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + extraAuthorizeParameters: + description: extraAuthorizeParameters are any custom parameters to add to the authorize request. + type: object + additionalProperties: + type: string + extraScopes: + description: extraScopes are any scopes to request in addition to the standard "openid" scope. + type: array + items: + type: string + issuer: + description: issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component. + type: string + requestHeader: + description: requestHeader enables user authentication using request header credentials + type: object + properties: + ca: + description: ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key "ca.crt" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + challengeURL: + description: challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + type: array + items: + type: string + emailHeaders: + description: emailHeaders is the set of headers to check for the email address + type: array + items: + type: string + headers: + description: headers is the set of headers to check for identity information + type: array + items: + type: string + loginURL: + description: loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for the display name + type: array + items: + type: string + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers to check for the preferred username + type: array + items: + type: string + type: + description: type identifies the identity provider type for this entry. + type: string + x-kubernetes-list-type: atomic + templates: + description: templates allow you to customize pages like the login page. + type: object + properties: + error: + description: error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key "errors.html" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + login: + description: login is the name of a secret that specifies a go template to use to render the login page. The key "login.html" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + providerSelection: + description: providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key "providers.html" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tokenConfig: + description: tokenConfig contains options for authorization and access tokens + type: object + properties: + accessTokenInactivityTimeout: + description: "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. \n WARNING: existing tokens' timeout will not be affected (lowered) by changing this value" + type: string + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' + type: integer + format: int32 + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of access tokens + type: integer + format: int32 + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml new file mode 100644 index 000000000..42f745c67 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: projects.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Project holds cluster-wide information about Project. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used. + type: object + properties: + name: + description: name is the metadata.name of the referenced project request template + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml new file mode 100644 index 000000000..f161bc432 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Scheduler + listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod''s selector. For example, defaultNodeSelector: "type=user-node,region=east" would set nodeSelector field in pod spec to "type=user-node,region=east" to all pods created in all namespaces. Namespaces having project-wide node selectors won''t be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: "type=user-node,region=west" means that the default of "type=user-node,region=east" set in defaultNodeSelector would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + profile: + description: "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods. \n Valid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"" + type: string + enum: + - "" + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/doc.go b/vendor/github.com/openshift/api/config/v1/doc.go new file mode 100644 index 000000000..4ff5208f2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go new file mode 100644 index 000000000..284d06f9a --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -0,0 +1,72 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIServer{}, + &APIServerList{}, + &Authentication{}, + &AuthenticationList{}, + &Build{}, + &BuildList{}, + &ClusterOperator{}, + &ClusterOperatorList{}, + &ClusterVersion{}, + &ClusterVersionList{}, + &Console{}, + &ConsoleList{}, + &DNS{}, + &DNSList{}, + &FeatureGate{}, + &FeatureGateList{}, + &Image{}, + &ImageList{}, + &Infrastructure{}, + &InfrastructureList{}, + &Ingress{}, + &IngressList{}, + &Network{}, + &NetworkList{}, + &OAuth{}, + &OAuthList{}, + &OperatorHub{}, + &OperatorHubList{}, + &Project{}, + &ProjectList{}, + &Proxy{}, + &ProxyList{}, + &Scheduler{}, + &SchedulerList{}, + &ImageContentPolicy{}, + &ImageContentPolicyList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1/stringsource.go b/vendor/github.com/openshift/api/config/v1/stringsource.go new file mode 100644 index 000000000..6a5718c1d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go new file mode 100644 index 000000000..56d00648e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -0,0 +1,400 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ConfigMapFileReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapFileReference struct { + Name string `json:"name"` + // Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + Key string `json:"key,omitempty"` +} + +// ConfigMapNameReference references a config map in a specific namespace. +// The namespace must be specified at the point of use. +type ConfigMapNameReference struct { + // name is the metadata.name of the referenced config map + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// SecretNameReference references a secret in a specific namespace. +// The namespace must be specified at the point of use. +type SecretNameReference struct { + // name is the metadata.name of the referenced secret + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int64 `json:"maxRequestsInFlight"` + // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int64 `json:"requestTimeoutSeconds"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // BindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + // +optional + ClientCA string `json:"clientCA,omitempty"` + // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates,omitempty"` + // MinTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // CipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // Names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names,omitempty"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// LeaderElection provides information to elect a leader +type LeaderElection struct { + // disable allows leader election to be suspended while allowing a fully defaulted "normal" startup case. + Disable bool `json:"disable,omitempty"` + // namespace indicates which namespace the resource is in + Namespace string `json:"namespace,omitempty"` + // name indicates what name to use for the resource + Name string `json:"name,omitempty"` + + // leaseDuration is the duration that non-leader candidates will wait + // after observing a leadership renewal until attempting to acquire + // leadership of a led but unrenewed leader slot. This is effectively the + // maximum duration that a leader can be stopped before it is replaced + // by another candidate. This is only applicable if leader election is + // enabled. + // +nullable + LeaseDuration metav1.Duration `json:"leaseDuration"` + // renewDeadline is the interval between attempts by the acting master to + // renew a leadership slot before it stops leading. This must be less + // than or equal to the lease duration. This is only applicable if leader + // election is enabled. + // +nullable + RenewDeadline metav1.Duration `json:"renewDeadline"` + // retryPeriod is the duration the clients should wait between attempting + // acquisition and renewal of a leadership. This is only applicable if + // leader election is enabled. + // +nullable + RetryPeriod metav1.Duration `json:"retryPeriod"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // KeyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // URL is the remote URL to connect to + URL string `json:"url"` + // CA is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type AdmissionConfig struct { + PluginConfig map[string]AdmissionPluginConfig `json:"pluginConfig,omitempty"` + + // enabledPlugins is a list of admission plugins that must be on in addition to the default list. + // Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon + // and can result in performance penalties and unexpected behavior. + EnabledAdmissionPlugins []string `json:"enabledPlugins,omitempty"` + + // disabledPlugins is a list of admission plugins that must be off. Putting something in this list + // is almost always a mistake and likely to result in cluster instability. + DisabledAdmissionPlugins []string `json:"disabledPlugins,omitempty"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // Location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + Configuration runtime.RawExtension `json:"configuration"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int32 `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int32 `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int32 `json:"maximumFileSizeMegabytes"` + + // PolicyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // PolicyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + // +nullable + // +kubebuilder:pruning:PreserveUnknownFields + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // URLs are the URLs for etcd + URLs []string `json:"urls,omitempty"` + // CA is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +type EtcdStorageConfig struct { + EtcdConnectionInfo `json:",inline"` + + // StoragePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. + StoragePrefix string `json:"storagePrefix"` +} + +// GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd +type GenericAPIServerConfig struct { + // servingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // corsAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // auditConfig describes how to configure audit information + AuditConfig AuditConfig `json:"auditConfig"` + + // storageConfig contains information about how to use + StorageConfig EtcdStorageConfig `json:"storageConfig"` + + // admissionConfig holds information about how to configure admission. + AdmissionConfig AdmissionConfig `json:"admission"` + + KubeClientConfig KubeClientConfig `json:"kubeClientConfig"` +} + +type KubeClientConfig struct { + // kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config + KubeConfig string `json:"kubeConfig"` + + // connectionOverrides specifies client overrides for system components to loop back to this master. + ConnectionOverrides ClientConnectionOverrides `json:"connectionOverrides"` +} + +type ClientConnectionOverrides struct { + // acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // contentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // qps controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// GenericControllerConfig provides information to configure a controller +type GenericControllerConfig struct { + // ServingInfo is the HTTP serving information for the controller's endpoints + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // leaderElection provides information to elect a leader. Only override this if you have a specific need + LeaderElection LeaderElection `json:"leaderElection"` + + // authentication allows configuration of authentication for the endpoints + Authentication DelegatedAuthentication `json:"authentication"` + // authorization allows configuration of authentication for the endpoints + Authorization DelegatedAuthorization `json:"authorization"` +} + +// DelegatedAuthentication allows authentication to be disabled. +type DelegatedAuthentication struct { + // disabled indicates that authentication should be disabled. By default it will use delegated authentication. + Disabled bool `json:"disabled,omitempty"` +} + +// DelegatedAuthorization allows authorization to be disabled. +type DelegatedAuthorization struct { + // disabled indicates that authorization should be disabled. By default it will use delegated authorization. + Disabled bool `json:"disabled,omitempty"` +} +type RequiredHSTSPolicy struct { + // namespaceSelector specifies a label selector such that the policy applies only to those routes that + // are in namespaces with labels that match the selector, and are in one of the DomainPatterns. + // Defaults to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` + + // domainPatterns is a list of domains for which the desired HSTS annotations are required. + // If domainPatterns is specified and a route is created with a spec.host matching one of the domains, + // the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. + // + // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. + // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + DomainPatterns []string `json:"domainPatterns"` + + // maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. + // If set to 0, it negates the effect, and hosts are removed as HSTS hosts. + // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. + // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS + // policy will eventually expire on that client. + MaxAge MaxAgePolicy `json:"maxAge"` + + // preloadPolicy directs the client to include hosts in its host preload list so that + // it never needs to do an initial load to get the HSTS header (note that this is not defined + // in RFC 6797 and is therefore client implementation-dependent). + // +optional + PreloadPolicy PreloadPolicy `json:"preloadPolicy,omitempty"` + + // includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's + // domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: + // - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com + // - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com + // - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com + // +optional + IncludeSubDomainsPolicy IncludeSubDomainsPolicy `json:"includeSubDomainsPolicy,omitempty"` +} + +// MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy +type MaxAgePolicy struct { + // The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // This value can be left unspecified, in which case no upper limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + LargestMaxAge *int32 `json:"largestMaxAge,omitempty"` + + // The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age + // Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary + // tool for administrators to quickly correct mistakes. + // This value can be left unspecified, in which case no lower limit is enforced. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=2147483647 + SmallestMaxAge *int32 `json:"smallestMaxAge,omitempty"` +} + +// PreloadPolicy contains a value for specifying a compliant HSTS preload policy for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequirePreload;RequireNoPreload;NoOpinion +type PreloadPolicy string + +const ( + // RequirePreloadPolicy means HSTS "preload" is required by the RequiredHSTSPolicy + RequirePreloadPolicy PreloadPolicy = "RequirePreload" + + // RequireNoPreloadPolicy means HSTS "preload" is forbidden by the RequiredHSTSPolicy + RequireNoPreloadPolicy PreloadPolicy = "RequireNoPreload" + + // NoOpinionPreloadPolicy means HSTS "preload" doesn't matter to the RequiredHSTSPolicy + NoOpinionPreloadPolicy PreloadPolicy = "NoOpinion" +) + +// IncludeSubDomainsPolicy contains a value for specifying a compliant HSTS includeSubdomains policy +// for the enclosing RequiredHSTSPolicy +// +kubebuilder:validation:Enum=RequireIncludeSubDomains;RequireNoIncludeSubDomains;NoOpinion +type IncludeSubDomainsPolicy string + +const ( + // RequireIncludeSubDomains means HSTS "includeSubDomains" is required by the RequiredHSTSPolicy + RequireIncludeSubDomains IncludeSubDomainsPolicy = "RequireIncludeSubDomains" + + // RequireNoIncludeSubDomains means HSTS "includeSubDomains" is forbidden by the RequiredHSTSPolicy + RequireNoIncludeSubDomains IncludeSubDomainsPolicy = "RequireNoIncludeSubDomains" + + // NoOpinionIncludeSubDomains means HSTS "includeSubDomains" doesn't matter to the RequiredHSTSPolicy + NoOpinionIncludeSubDomains IncludeSubDomainsPolicy = "NoOpinion" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go new file mode 100644 index 000000000..31801aacf --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -0,0 +1,211 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServer holds configuration (like serving certificates, client CA and CORS domains) +// shared by all API servers in the system, among them especially kube-apiserver +// and openshift-apiserver. The canonical name of an instance is 'cluster'. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec APIServerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status APIServerStatus `json:"status"` +} + +type APIServerSpec struct { + // servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates + // will be used for serving secure traffic. + // +optional + ServingCerts APIServerServingCerts `json:"servingCerts"` + // clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for + // incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. + // You usually only have to set this if you have your own PKI you wish to honor client certificates from. + // The ConfigMap must exist in the openshift-config namespace and contain the following required fields: + // - ConfigMap.Data["ca-bundle.crt"] - CA bundle. + // +optional + ClientCA ConfigMapNameReference `json:"clientCA"` + // additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the + // API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth + // server from JavaScript applications. + // The values are regular expressions that correspond to the Golang regular expression language. + // +optional + AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` + // encryption allows the configuration of encryption of resources at the datastore layer. + // +optional + Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // If unset, a default (which may change between releases) is chosen. Note that only Old, + // Intermediate and Custom profiles are currently supported, and the maximum available + // MinTLSVersions is VersionTLS12. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` + // audit specifies the settings for audit configuration to be applied to all OpenShift-provided + // API servers in the cluster. + // +optional + // +kubebuilder:default={profile: Default} + Audit Audit `json:"audit"` +} + +// AuditProfileType defines the audit policy profile type. +// +kubebuilder:validation:Enum=Default;WriteRequestBodies;AllRequestBodies;None +type AuditProfileType string + +const ( + // "None" disables audit logs. + NoneAuditProfileType AuditProfileType = "None" + + // "Default" is the existing default audit configuration policy. + DefaultAuditProfileType AuditProfileType = "Default" + + // "WriteRequestBodies" is similar to Default but it logs request and response + // HTTP payloads for write requests (create, update, patch) + WriteRequestBodiesAuditProfileType AuditProfileType = "WriteRequestBodies" + + // "AllRequestBodies" is similar to WriteRequestBodies, but also logs request + // and response HTTP payloads for read requests (get, list). + AllRequestBodiesAuditProfileType AuditProfileType = "AllRequestBodies" +) + +type Audit struct { + // profile specifies the name of the desired top-level audit profile to be applied to all requests + // sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, + // openshift-apiserver and oauth-apiserver), with the exception of those requests that match + // one or more of the customRules. + // + // The following profiles are provided: + // - Default: default policy which means MetaData level logging with the exception of events + // (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody + // level). + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // Warning: It is not recommended to disable audit logging by using the `None` profile unless you + // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. + // If you disable audit logging and a support situation arises, you might need to enable audit logging + // and reproduce the issue in order to troubleshoot properly. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:default=Default + Profile AuditProfileType `json:"profile,omitempty"` + // customRules specify profiles per group. These profile take precedence over the + // top-level profile field if they apply. They are evaluation from top to bottom and + // the first one that matches, applies. + // +listType=map + // +listMapKey=group + // +optional + CustomRules []AuditCustomRule `json:"customRules,omitempty"` +} + +// AuditCustomRule describes a custom rule for an audit profile that takes precedence over +// the top-level profile. +type AuditCustomRule struct { + // group is a name of group a request user must be member of in order to this profile to apply. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Group string `json:"group"` + // profile specifies the name of the desired audit policy configuration to be deployed to + // all OpenShift-provided API servers in the cluster. + // + // The following profiles are provided: + // - Default: the existing default policy. + // - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for + // write requests (create, update, patch). + // - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response + // HTTP payloads for read requests (get, list). + // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. + // + // If unset, the 'Default' profile is used as the default. + // + // +kubebuilder:validation:Required + // +required + Profile AuditProfileType `json:"profile,omitempty"` +} + +type APIServerServingCerts struct { + // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. + // If no named certificates are provided, or no named certificates match the server name as understood by a client, + // the defaultServingCertificate will be used. + // +optional + NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` +} + +// APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. +type APIServerNamedServingCert struct { + // names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to + // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. + // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + // +optional + Names []string `json:"names,omitempty"` + // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. + // The secret must exist in the openshift-config namespace and contain the following required fields: + // - Secret.Data["tls.key"] - TLS private key. + // - Secret.Data["tls.crt"] - TLS certificate. + ServingCertificate SecretNameReference `json:"servingCertificate"` +} + +type APIServerEncryption struct { + // type defines what encryption type should be used to encrypt resources at the datastore layer. + // When this field is unset (i.e. when it is set to the empty string), identity is implied. + // The behavior of unset can and will change over time. Even if encryption is enabled by default, + // the meaning of unset may change to a different encryption type based on changes in best practices. + // + // When encryption is enabled, all sensitive resources shipped with the platform are encrypted. + // This list of sensitive resources can and will change over time. The current authoritative list is: + // + // 1. secrets + // 2. configmaps + // 3. routes.route.openshift.io + // 4. oauthaccesstokens.oauth.openshift.io + // 5. oauthauthorizetokens.oauth.openshift.io + // + // +unionDiscriminator + // +optional + Type EncryptionType `json:"type,omitempty"` +} + +// +kubebuilder:validation:Enum="";identity;aescbc +type EncryptionType string + +const ( + // identity refers to a type where no encryption is performed at the datastore layer. + // Resources are written as-is without encryption. + EncryptionTypeIdentity EncryptionType = "identity" + + // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESCBC EncryptionType = "aescbc" +) + +type APIServerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type APIServerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []APIServer `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go new file mode 100644 index 000000000..7f346069e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -0,0 +1,156 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Authentication specifies cluster-wide settings for authentication (like OAuth and +// webhook token authenticators). The canonical name of an instance is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Authentication struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec AuthenticationSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status AuthenticationStatus `json:"status"` +} + +type AuthenticationSpec struct { + // type identifies the cluster managed, user facing authentication mode in use. + // Specifically, it manages the component that responds to login attempts. + // The default is IntegratedOAuth. + // +optional + Type AuthenticationType `json:"type"` + + // oauthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for an external OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // If oauthMetadata.name is non-empty, this value has precedence + // over any metadata reference stored in status. + // The key "oauthMetadata" is used to locate the data. + // If specified and the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config. + // +optional + OAuthMetadata ConfigMapNameReference `json:"oauthMetadata"` + + // webhookTokenAuthenticators is DEPRECATED, setting it has no effect. + WebhookTokenAuthenticators []DeprecatedWebhookTokenAuthenticator `json:"webhookTokenAuthenticators,omitempty"` + + // webhookTokenAuthenticator configures a remote token reviewer. + // These remote authentication webhooks can be used to verify bearer tokens + // via the tokenreviews.authentication.k8s.io REST API. This is required to + // honor bearer tokens that are provisioned by an external authentication service. + // +optional + WebhookTokenAuthenticator *WebhookTokenAuthenticator `json:"webhookTokenAuthenticator,omitempty"` + + // serviceAccountIssuer is the identifier of the bound service account token + // issuer. + // The default is https://kubernetes.default.svc + // WARNING: Updating this field will result in the invalidation of + // all bound tokens with the previous issuer value. Unless the + // holder of a bound token has explicit support for a change in + // issuer, they will not request a new bound token until pod + // restart or until their existing token exceeds 80% of its + // duration. + // +optional + ServiceAccountIssuer string `json:"serviceAccountIssuer"` +} + +type AuthenticationStatus struct { + // integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + // Authorization Server Metadata for the in-cluster integrated OAuth server. + // This discovery document can be viewed from its served location: + // oc get --raw '/.well-known/oauth-authorization-server' + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This contains the observed value based on cluster state. + // An explicitly set value in spec.oauthMetadata has precedence over this field. + // This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + // The key "oauthMetadata" is used to locate the data. + // If the config map or expected key is not found, no metadata is served. + // If the specified metadata is not valid, no metadata is served. + // The namespace for this config map is openshift-config-managed. + IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // KeycloakOAuthMetadata ConfigMapNameReference `json:"keycloakOAuthMetadata"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AuthenticationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Authentication `json:"items"` +} + +type AuthenticationType string + +const ( + // None means that no cluster managed authentication system is in place. + // Note that user login will only work if a manually configured system is in place and + // referenced in authentication spec via oauthMetadata and webhookTokenAuthenticators. + AuthenticationTypeNone AuthenticationType = "None" + + // IntegratedOAuth refers to the cluster managed OAuth server. + // It is configured via the top level OAuth config. + AuthenticationTypeIntegratedOAuth AuthenticationType = "IntegratedOAuth" + + // TODO if we add support for an in-cluster operator managed Keycloak instance + // AuthenticationTypeKeycloak AuthenticationType = "Keycloak" +) + +// deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. +// It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. +type DeprecatedWebhookTokenAuthenticator struct { + // kubeConfig contains kube config file data which describes how to access the remote webhook service. + // For further details, see: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // The namespace for this secret is determined by the point of use. + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +// webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator +type WebhookTokenAuthenticator struct { + // kubeConfig references a secret that contains kube config file data which + // describes how to access the remote webhook service. + // The namespace for the referenced secret is openshift-config. + // + // For further details, see: + // + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + // + // The key "kubeConfig" is used to locate the data. + // If the secret or expected key is not found, the webhook is not honored. + // If the specified kube config data is not valid, the webhook is not honored. + // +kubebuilder:validation:Required + // +required + KubeConfig SecretNameReference `json:"kubeConfig"` +} + +const ( + // OAuthMetadataKey is the key for the oauth authorization server metadata + OAuthMetadataKey = "oauthMetadata" + + // KubeConfigKey is the key for the kube config file data in a secret + KubeConfigKey = "kubeConfig" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go new file mode 100644 index 000000000..34f46a1f9 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -0,0 +1,121 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configures the behavior of OpenShift builds for the entire cluster. +// This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds. +// +// The canonical name is "cluster" +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Build struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the build controller configuration + // +kubebuilder:validation:Required + // +required + Spec BuildSpec `json:"spec"` +} + +type BuildSpec struct { + // AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted for image pushes and pulls during builds. + // The namespace for this config map is openshift-config. + // + // DEPRECATED: Additional CAs for image pull and push should be set on + // image.config.openshift.io/cluster instead. + // + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + // BuildDefaults controls the default information for Builds + // +optional + BuildDefaults BuildDefaults `json:"buildDefaults"` + // BuildOverrides controls override settings for builds + // +optional + BuildOverrides BuildOverrides `json:"buildOverrides"` +} + +type BuildDefaults struct { + // DefaultProxy contains the default proxy settings for all build operations, including image pull/push + // and source download. + // + // Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables + // in the build config's strategy. + // +optional + DefaultProxy *ProxySpec `json:"defaultProxy,omitempty"` + + // GitProxy contains the proxy settings for git operations only. If set, this will override + // any Proxy settings for all git commands, such as git clone. + // + // Values that are not set here will be inherited from DefaultProxy. + // +optional + GitProxy *ProxySpec `json:"gitProxy,omitempty"` + + // Env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + // +optional + Env []corev1.EnvVar `json:"env,omitempty"` + + // ImageLabels is a list of docker labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // Resources defines resource requirements to execute the build. + // +optional + Resources corev1.ResourceRequirements `json:"resources"` +} + +type ImageLabel struct { + // Name defines the name of the label. It must have non-zero length. + Name string `json:"name"` + + // Value defines the literal value of the label. + // +optional + Value string `json:"value,omitempty"` +} + +type BuildOverrides struct { + // ImageLabels is a list of docker labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + // +optional + ImageLabels []ImageLabel `json:"imageLabels,omitempty"` + + // NodeSelector is a selector which must be true for the build pod to fit on a node + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // Tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + + // ForcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Build `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go new file mode 100644 index 000000000..bbe359679 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -0,0 +1,203 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterOperator is the Custom Resource object which holds the current state +// of an operator. This object is used by operators to convey their state to +// the rest of the cluster. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterOperator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // spec holds configuration that could apply to any operator. + // +kubebuilder:validation:Required + // +required + Spec ClusterOperatorSpec `json:"spec"` + + // status holds the information about the state of an operator. It is consistent with status information across + // the Kubernetes ecosystem. + // +optional + Status ClusterOperatorStatus `json:"status"` +} + +// ClusterOperatorSpec is empty for now, but you could imagine holding information like "pause". +type ClusterOperatorSpec struct { +} + +// ClusterOperatorStatus provides information about the status of the operator. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatus struct { + // conditions describes the state of the operator's managed and monitored components. + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple + // operand entries in the array. Available operators must report the version of the operator itself with the name "operator". + // An operator reports a new "operator" version when it has rolled out the new version to all of its operands. + // +optional + Versions []OperandVersion `json:"versions,omitempty"` + + // relatedObjects is a list of objects that are "interesting" or related to this operator. Common uses are: + // 1. the detailed resource driving the operator + // 2. operator namespaces + // 3. operand namespaces + // +optional + RelatedObjects []ObjectReference `json:"relatedObjects,omitempty"` + + // extension contains any additional status information specific to the + // operator which owns this status object. + // +nullable + // +optional + // +kubebuilder:pruning:PreserveUnknownFields + Extension runtime.RawExtension `json:"extension"` +} + +type OperandVersion struct { + // name is the name of the particular operand this version is for. It usually matches container images, not operators. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // version indicates which version of a particular operand is currently being managed. It must always match the Available + // operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout + // 1.1.0 + // +kubebuilder:validation:Required + // +required + Version string `json:"version"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +type ObjectReference struct { + // group of the referent. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + // resource of the referent. + // +kubebuilder:validation:Required + // +required + Resource string `json:"resource"` + // namespace of the referent. + // +optional + Namespace string `json:"namespace,omitempty"` + // name of the referent. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ClusterOperatorStatusCondition represents the state of the operator's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type ClusterOperatorStatusCondition struct { + // type specifies the aspect reported by this condition. + // +kubebuilder:validation:Required + // +required + Type ClusterStatusConditionType `json:"type"` + + // status of the condition, one of True, False, Unknown. + // +kubebuilder:validation:Required + // +required + Status ConditionStatus `json:"status"` + + // lastTransitionTime is the time of the last update to the current status property. + // +kubebuilder:validation:Required + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // reason is the CamelCase reason for the condition's current status. + // +optional + Reason string `json:"reason,omitempty"` + + // message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + // +optional + Message string `json:"message,omitempty"` +} + +// ClusterStatusConditionType is an aspect of operator state. +type ClusterStatusConditionType string + +const ( + // Available indicates that the component (operator and all configured operands) + // is functional and available in the cluster. Available=False means at least + // part of the component is non-functional, and that the condition requires + // immediate administrator intervention. + OperatorAvailable ClusterStatusConditionType = "Available" + + // Progressing indicates that the component (operator and all configured operands) + // is actively rolling out new code, propagating config changes, or otherwise + // moving from one steady state to another. Operators should not report + // progressing when they are reconciling (without action) a previously known + // state. If the observed cluster state has changed and the component is + // reacting to it (scaling up for instance), Progressing should become true + // since it is moving from one steady state to another. + OperatorProgressing ClusterStatusConditionType = "Progressing" + + // Degraded indicates that the component (operator and all configured operands) + // does not match its desired state over a period of time resulting in a lower + // quality of service. The period of time may vary by component, but a Degraded + // state represents persistent observation of a condition. As a result, a + // component should not oscillate in and out of Degraded state. A component may + // be Available even if its degraded. For example, a component may desire 3 + // running pods, but 1 pod is crash-looping. The component is Available but + // Degraded because it may have a lower quality of service. A component may be + // Progressing but not Degraded because the transition from one state to + // another does not persist over a long enough period to report Degraded. A + // component should not report Degraded during the course of a normal upgrade. + // A component may report Degraded in response to a persistent infrastructure + // failure that requires eventual administrator intervention. For example, if + // a control plane host is unhealthy and must be replaced. A component should + // report Degraded if unexpected errors occur over a period, but the + // expectation is that all unexpected errors are handled as operators mature. + OperatorDegraded ClusterStatusConditionType = "Degraded" + + // Upgradeable indicates whether the component (operator and all configured + // operands) is safe to upgrade based on the current cluster state. When + // Upgradeable is False, the cluster-version operator will prevent the + // cluster from performing impacted updates unless forced. When set on + // ClusterVersion, the message will explain which updates (minor or patch) + // are impacted. When set on ClusterOperator, False will block minor + // OpenShift updates. The message field should contain a human readable + // description of what the administrator should do to allow the cluster or + // component to successfully update. The cluster-version operator will + // allow updates when this condition is not False, including when it is + // missing, True, or Unknown. + OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" +) + +// ClusterOperatorList is a list of OperatorStatus resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterOperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterOperator `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go new file mode 100644 index 000000000..44e867778 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -0,0 +1,429 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterVersion is the configuration for the ClusterVersionOperator. This is where +// parameters related to automatic updates can be set. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterVersion struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the desired state of the cluster version - the operator will work + // to ensure that the desired version is applied to the cluster. + // +kubebuilder:validation:Required + // +required + Spec ClusterVersionSpec `json:"spec"` + // status contains information about the available updates and any in-progress + // updates. + // +optional + Status ClusterVersionStatus `json:"status"` +} + +// ClusterVersionSpec is the desired version state of the cluster. It includes +// the version the cluster should be at, how the cluster is identified, and +// where the cluster should look for version updates. +// +k8s:deepcopy-gen=true +type ClusterVersionSpec struct { + // clusterID uniquely identifies this cluster. This is expected to be + // an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in + // hexadecimal values). This is a required field. + // +kubebuilder:validation:Required + // +required + ClusterID ClusterID `json:"clusterID"` + + // desiredUpdate is an optional field that indicates the desired value of + // the cluster version. Setting this value will trigger an upgrade (if + // the current version does not match the desired version). The set of + // recommended update values is listed as part of available updates in + // status, and setting values outside that range may cause the upgrade + // to fail. You may specify the version field without setting image if + // an update exists with that version in the availableUpdates or history. + // + // If an upgrade fails the operator will halt and report status + // about the failing component. Setting the desired update value back to + // the previous version will cause a rollback to be attempted. Not all + // rollbacks will succeed. + // + // +optional + DesiredUpdate *Update `json:"desiredUpdate,omitempty"` + + // upstream may be used to specify the preferred update server. By default + // it will use the appropriate update server for the cluster and region. + // + // +optional + Upstream URL `json:"upstream,omitempty"` + // channel is an identifier for explicitly requesting that a non-default + // set of updates be applied to this cluster. The default channel will be + // contain stable updates that are appropriate for production clusters. + // + // +optional + Channel string `json:"channel,omitempty"` + + // overrides is list of overides for components that are managed by + // cluster version operator. Marking a component unmanaged will prevent + // the operator from creating or updating the object. + // +optional + Overrides []ComponentOverride `json:"overrides,omitempty"` +} + +// ClusterVersionStatus reports the status of the cluster versioning, +// including any upgrades that are in progress. The current field will +// be set to whichever version the cluster is reconciling to, and the +// conditions array will report whether the update succeeded, is in +// progress, or is failing. +// +k8s:deepcopy-gen=true +type ClusterVersionStatus struct { + // desired is the version that the cluster is reconciling towards. + // If the cluster is not yet fully initialized desired will be set + // with the information available, which may be an image or a tag. + // +kubebuilder:validation:Required + // +required + Desired Release `json:"desired"` + + // history contains a list of the most recent versions applied to the cluster. + // This value may be empty during cluster startup, and then will be updated + // when a new update is being applied. The newest update is first in the + // list and it is ordered by recency. Updates in the history have state + // Completed if the rollout completed - if an update was failing or halfway + // applied the state will be Partial. Only a limited amount of update history + // is preserved. + // +optional + History []UpdateHistory `json:"history,omitempty"` + + // observedGeneration reports which version of the spec is being synced. + // If this value is not equal to metadata.generation, then the desired + // and conditions fields may represent a previous version. + // +kubebuilder:validation:Required + // +required + ObservedGeneration int64 `json:"observedGeneration"` + + // versionHash is a fingerprint of the content that the cluster will be + // updated with. It is used by the operator to avoid unnecessary work + // and is for internal use only. + // +kubebuilder:validation:Required + // +required + VersionHash string `json:"versionHash"` + + // conditions provides information about the cluster version. The condition + // "Available" is set to true if the desiredUpdate has been reached. The + // condition "Progressing" is set to true if an update is being applied. + // The condition "Degraded" is set to true if an update is currently blocked + // by a temporary or permanent error. Conditions are only valid for the + // current desiredUpdate when metadata.generation is equal to + // status.generation. + // +optional + Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"` + + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. + // +nullable + // +kubebuilder:validation:Required + // +required + AvailableUpdates []Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +listType=atomic + // +optional + ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` +} + +// UpdateState is a constant representing whether an update was successfully +// applied to the cluster or not. +type UpdateState string + +const ( + // CompletedUpdate indicates an update was successfully applied + // to the cluster (all resource updates were successful). + CompletedUpdate UpdateState = "Completed" + // PartialUpdate indicates an update was never completely applied + // or is currently being applied. + PartialUpdate UpdateState = "Partial" +) + +// UpdateHistory is a single attempted update to the cluster. +type UpdateHistory struct { + // state reflects whether the update was fully applied. The Partial state + // indicates the update is not fully applied, while the Completed state + // indicates the update was successfully rolled out at least once (all + // parts of the update successfully applied). + // +kubebuilder:validation:Required + // +required + State UpdateState `json:"state"` + + // startedTime is the time at which the update was started. + // +kubebuilder:validation:Required + // +required + StartedTime metav1.Time `json:"startedTime"` + + // completionTime, if set, is when the update was fully applied. The update + // that is currently being applied will have a null completion time. + // Completion time will always be set for entries that are not the current + // update (usually to the started time of the next update). + // +kubebuilder:validation:Required + // +required + // +nullable + CompletionTime *metav1.Time `json:"completionTime"` + + // version is a semantic versioning identifying the update version. If the + // requested image does not define a version, or if a failure occurs + // retrieving the image, this value may be empty. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. This value + // is always populated. + // +kubebuilder:validation:Required + // +required + Image string `json:"image"` + + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + // Verified does not cover upgradeable checks that depend on the cluster + // state at the time when the update target was accepted. + // +kubebuilder:validation:Required + // +required + Verified bool `json:"verified"` + + // acceptedRisks records risks which were accepted to initiate the update. + // For example, it may menition an Upgradeable=False or missing signature + // that was overriden via desiredUpdate.force, or an update that was + // initiated despite not being in the availableUpdates set of recommended + // update targets. + // +optional + AcceptedRisks string `json:"acceptedRisks,omitempty"` +} + +// ClusterID is string RFC4122 uuid. +type ClusterID string + +// ComponentOverride allows overriding cluster version operator's behavior +// for a component. +// +k8s:deepcopy-gen=true +type ComponentOverride struct { + // kind indentifies which object to override. + // +kubebuilder:validation:Required + // +required + Kind string `json:"kind"` + // group identifies the API group that the kind is in. + // +kubebuilder:validation:Required + // +required + Group string `json:"group"` + + // namespace is the component's namespace. If the resource is cluster + // scoped, the namespace should be empty. + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + // name is the component's name. + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // unmanaged controls if cluster version operator should stop managing the + // resources in this cluster. + // Default: false + // +kubebuilder:validation:Required + // +required + Unmanaged bool `json:"unmanaged"` +} + +// URL is a thin wrapper around string that ensures the string is a valid URL. +type URL string + +// Update represents an administrator update request. +// +k8s:deepcopy-gen=true +type Update struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // + // +optional + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // + // +optional + Image string `json:"image"` + + // force allows an administrator to update to an image that has failed + // verification or upgradeable checks. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // +optional + Force bool `json:"force"` +} + +// Release represents an OpenShift release image and associated metadata. +// +k8s:deepcopy-gen=true +type Release struct { + // version is a semantic versioning identifying the update version. When this + // field is part of spec, version is optional if image is specified. + // +required + Version string `json:"version"` + + // image is a container image location that contains the update. When this + // field is part of spec, image is optional if version is specified and the + // availableUpdates field contains a matching version. + // +required + Image string `json:"image"` + + // url contains information about this release. This URL is set by + // the 'url' metadata property on a release or the metadata returned by + // the update API and should be displayed as a link in user + // interfaces. The URL field may not be set for test or nightly + // releases. + // +optional + URL URL `json:"url,omitempty"` + + // channels is the set of Cincinnati channels to which the release + // currently belongs. + // +optional + Channels []string `json:"channels,omitempty"` +} + +// RetrievedUpdates reports whether available updates have been retrieved from +// the upstream update server. The condition is Unknown before retrieval, False +// if the updates could not be retrieved or recently failed, or True if the +// availableUpdates field is accurate and recent. +const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" + +// ConditionalUpdate represents an update which is recommended to some +// clusters on the version the current cluster is reconciling, but which +// may not be recommended for the current cluster. +type ConditionalUpdate struct { + // release is the target of the update. + // +kubebuilder:validation:Required + // +required + Release Release `json:"release"` + + // risks represents the range of issues associated with + // updating to the target release. The cluster-version + // operator will evaluate all entries, and only recommend the + // update if there is at least one entry and all entries + // recommend the update. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +required + Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` + + // conditions represents the observations of the conditional update's + // current status. Known types are: + // * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. + // * Recommended, for whether the update is recommended for the current cluster. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// ConditionalUpdateRisk represents a reason and cluster-state +// for not recommending a conditional update. +// +k8s:deepcopy-gen=true +type ConditionalUpdateRisk struct { + // url contains information about this risk. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name is the CamelCase reason for not recommending a + // conditional update, in the event that matchingRules match the + // cluster state. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // message provides additional information about the risk of + // updating, in the event that matchingRules match the cluster + // state. This is only to be consumed by humans. It may + // contain Line Feed characters (U+000A), which should be + // rendered as new lines. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Message string `json:"message"` + + // matchingRules is a slice of conditions for deciding which + // clusters match the risk and which do not. The slice is + // ordered by decreasing precedence. The cluster-version + // operator will walk the slice in order, and stop after the + // first it can successfully evaluate. If no condition can be + // successfully evaluated, the update will not be recommended. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +required + MatchingRules []ClusterCondition `json:"matchingRules"` +} + +// ClusterCondition is a union of typed cluster conditions. The 'type' +// property determines which of the type-specific properties are relevant. +// When evaluated on a cluster, the condition may match, not match, or +// fail to evaluate. +// +k8s:deepcopy-gen=true +type ClusterCondition struct { + // type represents the cluster-condition type. This defines + // the members and semantics of any additional properties. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={"Always","PromQL"} + // +required + Type string `json:"type"` + + // promQL represents a cluster condition based on PromQL. + // +optional + PromQL *PromQLClusterCondition `json:"promql,omitempty"` +} + +// PromQLClusterCondition represents a cluster condition based on PromQL. +type PromQLClusterCondition struct { + // PromQL is a PromQL query classifying clusters. This query + // query should return a 1 in the match case and a 0 in the + // does-not-match case. Queries which return no time + // series, or which return values besides 0 or 1, are + // evaluation failures. + // +kubebuilder:validation:Required + // +required + PromQL string `json:"promql"` +} + +// ClusterVersionList is a list of ClusterVersion resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type ClusterVersionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ClusterVersion `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go new file mode 100644 index 000000000..e1a128827 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -0,0 +1,69 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Console holds cluster-wide configuration for the web console, including the +// logout URL, and reports the public URL of the console. The canonical name is +// `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Console struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ConsoleSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ConsoleStatus `json:"status"` +} + +// ConsoleSpec is the specification of the desired behavior of the Console. +type ConsoleSpec struct { + // +optional + Authentication ConsoleAuthentication `json:"authentication"` +} + +// ConsoleStatus defines the observed status of the Console. +type ConsoleStatus struct { + // The URL for the console. This will be derived from the host for the route that + // is created for the console. + ConsoleURL string `json:"consoleURL"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Console `json:"items"` +} + +// ConsoleAuthentication defines a list of optional configuration for console authentication. +type ConsoleAuthentication struct { + // An optional, absolute URL to redirect web browsers to after logging out of + // the console. If not specified, it will redirect to the default login page. + // This is required when using an identity provider that supports single + // sign-on (SSO) such as: + // - OpenID (Keycloak, Azure) + // - RequestHeader (GSSAPI, SSPI, SAML) + // - OAuth (GitHub, GitLab, Google) + // Logging out of the console will destroy the user's token. The logoutRedirect + // provides the user the option to perform single logout (SLO) through the identity + // provider to destroy their single sign-on session. + // +optional + // +kubebuilder:validation:Pattern=`^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$` + LogoutRedirect string `json:"logoutRedirect,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go new file mode 100644 index 000000000..c223f828e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -0,0 +1,92 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DNS holds cluster-wide information about DNS. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNS struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec DNSSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status DNSStatus `json:"status"` +} + +type DNSSpec struct { + // baseDomain is the base domain of the cluster. All managed DNS records will + // be sub-domains of this base. + // + // For example, given the base domain `openshift.example.com`, an API server + // DNS record may be created for `cluster-api.openshift.example.com`. + // + // Once set, this field cannot be changed. + BaseDomain string `json:"baseDomain"` + // publicZone is the location where all the DNS records that are publicly accessible to + // the internet exist. + // + // If this field is nil, no public records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PublicZone *DNSZone `json:"publicZone,omitempty"` + // privateZone is the location where all the DNS records that are only available internally + // to the cluster exist. + // + // If this field is nil, no private records should be created. + // + // Once set, this field cannot be changed. + // + // +optional + PrivateZone *DNSZone `json:"privateZone,omitempty"` +} + +// DNSZone is used to define a DNS hosted zone. +// A zone can be identified by an ID or tags. +type DNSZone struct { + // id is the identifier that can be used to find the DNS hosted zone. + // + // on AWS zone can be fetched using `ID` as id in [1] + // on Azure zone can be fetched using `ID` as a pre-determined name in [2], + // on GCP zone can be fetched using `ID` as a pre-determined name in [3]. + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + // [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + // [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get + // +optional + ID string `json:"id,omitempty"` + + // tags can be used to query the DNS hosted zone. + // + // on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, + // + // [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + +type DNSStatus struct { + // dnsSuffix (service-ca amongst others) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DNSList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []DNS `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go new file mode 100644 index 000000000..149cf8e6f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -0,0 +1,225 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec FeatureGateSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status FeatureGateStatus `json:"status"` +} + +type FeatureSet string + +var ( + // Default feature set that allows upgrades. + Default FeatureSet = "" + + // TechPreviewNoUpgrade turns on tech preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. + CustomNoUpgrade FeatureSet = "CustomNoUpgrade" + + // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. + LatencySensitive FeatureSet = "LatencySensitive" + + // IPv6DualStackNoUpgrade enables dual-stack. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + IPv6DualStackNoUpgrade FeatureSet = "IPv6DualStackNoUpgrade" +) + +type FeatureGateSpec struct { + FeatureGateSelection `json:",inline"` +} + +// +union +type FeatureGateSelection struct { + // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. + // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. + // +unionDiscriminator + // +optional + FeatureSet FeatureSet `json:"featureSet,omitempty"` + + // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. + // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations + // your cluster may fail in an unrecoverable way. featureSet must equal "CustomNoUpgrade" must be set to use this field. + // +optional + // +nullable + CustomNoUpgrade *CustomFeatureGates `json:"customNoUpgrade,omitempty"` +} + +type CustomFeatureGates struct { + // enabled is a list of all feature gates that you want to force on + // +optional + Enabled []string `json:"enabled,omitempty"` + // disabled is a list of all feature gates that you want to force off + // +optional + Disabled []string `json:"disabled,omitempty"` +} + +type FeatureGateStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type FeatureGateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []FeatureGate `json:"items"` +} + +type FeatureGateEnabledDisabled struct { + Enabled []string + Disabled []string +} + +// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. +// +// NOTE: The caller needs to make sure to check for the existence of the value +// using golang's existence field. A possible scenario is an upgrade where new +// FeatureSets are added and a controller has not been upgraded with a newer +// version of this file. In this upgrade scenario the map could return nil. +// +// example: +// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } +// +// If you put an item in either of these lists, put your area and name on it so we can find owners. +var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ + Default: defaultFeatures, + CustomNoUpgrade: { + Enabled: []string{}, + Disabled: []string{}, + }, + TechPreviewNoUpgrade: newDefaultFeatures(). + with("CSIDriverAzureDisk"). // sig-storage, jsafrane, OCP specific + with("CSIDriverAzureFile"). // sig-storage, fbertina, OCP specific + with("CSIDriverVSphere"). // sig-storage, jsafrane, OCP specific + with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationOpenStack"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureDisk"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureFile"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationvSphere"). // sig-storage, fbertina, Kubernetes feature gate + with("ExternalCloudProvider"). // sig-cloud-provider, jspeed, OCP specific + with("InsightsOperatorPullingSCA"). // insights-operator/ccx, tremes, OCP specific + with("CSIDriverSharedResource"). // sig-build, adkaplan, OCP specific + with("BuildCSIVolumes"). // sig-build, adkaplan, OCP specific + with("NodeSwap"). // sig-node, ehashman, Kubernetes feature gate + with("MachineAPIProviderOpenStack"). // openstack, egarcia (#forum-openstack), OCP specific + toFeatures(), + LatencySensitive: newDefaultFeatures(). + with( + "TopologyManager", // sig-pod, sjenning + ). + toFeatures(), + IPv6DualStackNoUpgrade: newDefaultFeatures(). + with( + "IPv6DualStack", // sig-network, danwinship + ). + toFeatures(), +} + +var defaultFeatures = &FeatureGateEnabledDisabled{ + Enabled: []string{ + "APIPriorityAndFairness", // sig-apimachinery, deads2k + "RotateKubeletServerCertificate", // sig-pod, sjenning + "SupportPodPidsLimit", // sig-pod, sjenning + "NodeDisruptionExclusion", // sig-scheduling, ccoleman + "ServiceNodeExclusion", // sig-scheduling, ccoleman + "DownwardAPIHugePages", // sig-node, rphillips + "PodSecurity", // sig-auth, s-urbaniak + }, + Disabled: []string{ + "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman + }, +} + +type featureSetBuilder struct { + forceOn []string + forceOff []string +} + +func newDefaultFeatures() *featureSetBuilder { + return &featureSetBuilder{} +} + +func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder { + f.forceOn = append(f.forceOn, forceOn...) + return f +} + +func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder { + f.forceOff = append(f.forceOff, forceOff...) + return f +} + +func (f *featureSetBuilder) isForcedOff(needle string) bool { + for _, forcedOff := range f.forceOff { + if needle == forcedOff { + return true + } + } + return false +} + +func (f *featureSetBuilder) isForcedOn(needle string) bool { + for _, forceOn := range f.forceOn { + if needle == forceOn { + return true + } + } + return false +} + +func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled { + finalOn := []string{} + finalOff := []string{} + + // only add the default enabled features if they haven't been explicitly set off + for _, defaultOn := range defaultFeatures.Enabled { + if !f.isForcedOff(defaultOn) { + finalOn = append(finalOn, defaultOn) + } + } + for _, currOn := range f.forceOn { + if f.isForcedOff(currOn) { + panic("coding error, you can't have features both on and off") + } + finalOn = append(finalOn, currOn) + } + + // only add the default disabled features if they haven't been explicitly set on + for _, defaultOff := range defaultFeatures.Disabled { + if !f.isForcedOn(defaultOff) { + finalOff = append(finalOff, defaultOff) + } + } + for _, currOff := range f.forceOff { + finalOff = append(finalOff, currOff) + } + + return &FeatureGateEnabledDisabled{ + Enabled: finalOn, + Disabled: finalOff, + } +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go new file mode 100644 index 000000000..08a31072d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -0,0 +1,128 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image governs policies related to imagestream imports and runtime configuration +// for external registries. It allows cluster admins to configure which registries +// OpenShift is allowed to import images from, extra CA trust bundles for external +// registries, and policies to block or allow registry hostnames. +// When exposing OpenShift's image registry to the public, this also lets cluster +// admins specify the external hostname. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Image struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ImageStatus `json:"status"` +} + +type ImageSpec struct { + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + // +optional + AllowedRegistriesForImport []RegistryLocation `json:"allowedRegistriesForImport,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` + + // additionalTrustedCA is a reference to a ConfigMap containing additional CAs that + // should be trusted during imagestream import, pod image pull, build image pull, and + // imageregistry pullthrough. + // The namespace for this config map is openshift-config. + // +optional + AdditionalTrustedCA ConfigMapNameReference `json:"additionalTrustedCA"` + + // registrySources contains configuration that determines how the container runtime + // should treat individual registries when accessing images for builds+pods. (e.g. + // whether or not to allow insecure access). It does not contain configuration for the + // internal cluster registry. + // +optional + RegistrySources RegistrySources `json:"registrySources"` +} + +type ImageStatus struct { + + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // This value is set by the image registry operator which controls the internal registry + // hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + // +optional + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + // +optional + ExternalRegistryHostnames []string `json:"externalRegistryHostnames,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Image `json:"items"` +} + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // domainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + // +optional + Insecure bool `json:"insecure,omitempty"` +} + +// RegistrySources holds cluster-wide information about how to handle the registries config. +type RegistrySources struct { + // insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections. + // +optional + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // blockedRegistries cannot be used for image pull and push actions. All other registries are permitted. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + BlockedRegistries []string `json:"blockedRegistries,omitempty"` + // allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied. + // + // Only one of BlockedRegistries or AllowedRegistries may be set. + // +optional + AllowedRegistries []string `json:"allowedRegistries,omitempty"` + // containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified + // domains in their pull specs. Registries will be searched in the order provided in the list. + // Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports. + // +optional + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Format=hostname + // +listType=set + ContainerRuntimeSearchRegistries []string `json:"containerRuntimeSearchRegistries,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go new file mode 100644 index 000000000..8ccad9c53 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -0,0 +1,89 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageContentPolicySpec `json:"spec"` +} + +// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. +type ImageContentPolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To pull image from mirrors by tags, should set the "allowMirrorByTags". + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + // +listType=map + // +listMapKey=source + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageContentPolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` + Source string `json:"source"` + // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. + // Pulling images by tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // +optional + AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` + // mirrors is zero or more repositories that may also contain the same images. + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. No mirror will be configured. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + // +listType=set + Mirrors []Mirror `json:"mirrors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` +type Mirror string diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go new file mode 100644 index 000000000..fe42bec83 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -0,0 +1,706 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Infrastructure struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec InfrastructureSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InfrastructureStatus `json:"status"` +} + +// InfrastructureSpec contains settings that apply to the cluster infrastructure. +type InfrastructureSpec struct { + // cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. + // This configuration file is used to configure the Kubernetes cloud provider integration + // when using the built-in cloud provider integration or the external cloud controller manager. + // The namespace for this config map is openshift-config. + // + // cloudConfig should only be consumed by the kube_cloud_config controller. + // The controller is responsible for using the user configuration in the spec + // for various platforms and combining that with the user provided ConfigMap in this field + // to create a stitched kube cloud config. + // The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace + // with the kube cloud config is stored in `cloud.conf` key. + // All the clients are expected to use the generated ConfigMap only. + // + // +optional + CloudConfig ConfigMapFileReference `json:"cloudConfig"` + + // platformSpec holds desired information specific to the underlying + // infrastructure provider. + PlatformSpec PlatformSpec `json:"platformSpec,omitempty"` +} + +// InfrastructureStatus describes the infrastructure the cluster is leveraging. +type InfrastructureStatus struct { + // infrastructureName uniquely identifies a cluster with a human friendly name. + // Once set it should not be changed. Must be of max length 27 and must have only + // alphanumeric or hyphen characters. + InfrastructureName string `json:"infrastructureName"` + + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. + Platform PlatformType `json:"platform,omitempty"` + + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering + // etcd servers and clients. + // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` + + // apiServerURL is a valid URI with scheme 'https', address and + // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. + APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme 'https', + // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` + + // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // The 'External' mode indicates that the control plane is hosted externally to the cluster and that + // its components are not visible within the cluster. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica;External + ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` + + // infrastructureTopology expresses the expectations for infrastructure services that do not run on control + // plane nodes, usually indicated by a node selector for a `role` value + // other than `master`. + // The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. + // The 'SingleReplica' mode will be used in single-node deployments + // and the operators should not configure the operand for highly-available operation + // NOTE: External topology mode is not applicable for this field. + // +kubebuilder:default=HighlyAvailable + // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica + InfrastructureTopology TopologyMode `json:"infrastructureTopology"` +} + +// TopologyMode defines the topology mode of the control/infra nodes. +// NOTE: Enum validation is specified in each field that uses this type, +// given that External value is not applicable to the InfrastructureTopology +// field. +type TopologyMode string + +const ( + // "HighlyAvailable" is for operators to configure high-availability as much as possible. + HighlyAvailableTopologyMode TopologyMode = "HighlyAvailable" + + // "SingleReplica" is for operators to avoid spending resources for high-availability purpose. + SingleReplicaTopologyMode TopologyMode = "SingleReplica" + + // "External" indicates that the component is running externally to the cluster. When specified + // as the control plane topology, operators should avoid scheduling workloads to masters or assume + // that any of the control plane components such as kubernetes API server or etcd are visible within + // the cluster. + ExternalTopologyMode TopologyMode = "External" +) + +// PlatformType is a specific supported infrastructure provider. +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud +type PlatformType string + +const ( + // AWSPlatformType represents Amazon Web Services infrastructure. + AWSPlatformType PlatformType = "AWS" + + // AzurePlatformType represents Microsoft Azure infrastructure. + AzurePlatformType PlatformType = "Azure" + + // BareMetalPlatformType represents managed bare metal infrastructure. + BareMetalPlatformType PlatformType = "BareMetal" + + // GCPPlatformType represents Google Cloud Platform infrastructure. + GCPPlatformType PlatformType = "GCP" + + // LibvirtPlatformType represents libvirt infrastructure. + LibvirtPlatformType PlatformType = "Libvirt" + + // OpenStackPlatformType represents OpenStack infrastructure. + OpenStackPlatformType PlatformType = "OpenStack" + + // NonePlatformType means there is no infrastructure provider. + NonePlatformType PlatformType = "None" + + // VSpherePlatformType represents VMWare vSphere infrastructure. + VSpherePlatformType PlatformType = "VSphere" + + // OvirtPlatformType represents oVirt/RHV infrastructure. + OvirtPlatformType PlatformType = "oVirt" + + // IBMCloudPlatformType represents IBM Cloud infrastructure. + IBMCloudPlatformType PlatformType = "IBMCloud" + + // KubevirtPlatformType represents KubeVirt/Openshift Virtualization infrastructure. + KubevirtPlatformType PlatformType = "KubeVirt" + + // EquinixMetalPlatformType represents Equinix Metal infrastructure. + EquinixMetalPlatformType PlatformType = "EquinixMetal" + + // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. + PowerVSPlatformType PlatformType = "PowerVS" + + // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. + AlibabaCloudPlatformType PlatformType = "AlibabaCloud" +) + +// IBMCloudProviderType is a specific supported IBM Cloud provider cluster type +type IBMCloudProviderType string + +const ( + // Classic means that the IBM Cloud cluster is using classic infrastructure + IBMCloudProviderTypeClassic IBMCloudProviderType = "Classic" + + // VPC means that the IBM Cloud cluster is using VPC infrastructure + IBMCloudProviderTypeVPC IBMCloudProviderType = "VPC" + + // IBMCloudProviderTypeUPI means that the IBM Cloud cluster is using user provided infrastructure. + // This is utilized in IBM Cloud Satellite environments. + IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" +) + +// PlatformSpec holds the desired state specific to the underlying infrastructure provider +// of the current cluster. Since these are used at spec-level for the underlying cluster, it +// is supposed that only one of the spec structs is set. +type PlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. + // + // +unionDiscriminator + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformSpec `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformSpec `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformSpec `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformSpec `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformSpec `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformSpec `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformSpec `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformSpec `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` +} + +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud" and "None". + // Individual components may not support all platforms, and must handle + // unrecognized platforms as None if they do not support that platform. + // + // This value will be synced with to the `status.platform` and `status.platformStatus.type`. + // Currently this value cannot be changed once set. + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` + + // Azure contains settings specific to the Azure infrastructure provider. + // +optional + Azure *AzurePlatformStatus `json:"azure,omitempty"` + + // GCP contains settings specific to the Google Cloud Platform infrastructure provider. + // +optional + GCP *GCPPlatformStatus `json:"gcp,omitempty"` + + // BareMetal contains settings specific to the BareMetal platform. + // +optional + BareMetal *BareMetalPlatformStatus `json:"baremetal,omitempty"` + + // OpenStack contains settings specific to the OpenStack infrastructure provider. + // +optional + OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` + + // VSphere contains settings specific to the VSphere infrastructure provider. + // +optional + VSphere *VSpherePlatformStatus `json:"vsphere,omitempty"` + + // IBMCloud contains settings specific to the IBMCloud infrastructure provider. + // +optional + IBMCloud *IBMCloudPlatformStatus `json:"ibmcloud,omitempty"` + + // Kubevirt contains settings specific to the kubevirt infrastructure provider. + // +optional + Kubevirt *KubevirtPlatformStatus `json:"kubevirt,omitempty"` + + // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + // +optional + EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` +} + +// AWSServiceEndpoint store the configuration of a custom url to +// override existing defaults of AWS Services. +type AWSServiceEndpoint struct { + // name is the name of the AWS service. + // The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AWSPlatformSpec struct { + // serviceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` + + // ServiceEndpoints list contains custom endpoints which will override default + // service endpoint of AWS Services. + // There must be only one ServiceEndpoint for a service. + // +optional + ServiceEndpoints []AWSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // resourceTags is a list of additional tags to apply to AWS resources created for the cluster. + // See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. + // AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags + // available for the user. + // +kubebuilder:validation:MaxItems=25 + // +optional + ResourceTags []AWSResourceTag `json:"resourceTags,omitempty"` +} + +// AWSResourceTag is a tag to apply to AWS resources created for the cluster. +type AWSResourceTag struct { + // key is the key of the tag + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Key string `json:"key"` + // value is the value of the tag. + // Some AWS service do not support empty values. Since tags are added to resources in many services, the + // length of the tag value must meet the requirements of all services. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.:/=+-@]+$` + // +required + Value string `json:"value"` +} + +// AzurePlatformSpec holds the desired state of the Azure infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AzurePlatformSpec struct{} + +// AzurePlatformStatus holds the current status of the Azure infrastructure provider. +type AzurePlatformStatus struct { + // resourceGroupName is the Resource Group for new Azure resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` + + // cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK + // with the appropriate Azure API endpoints. + // If empty, the value is equal to `AzurePublicCloud`. + // +optional + CloudName AzureCloudEnvironment `json:"cloudName,omitempty"` + + // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + // +optional + ARMEndpoint string `json:"armEndpoint,omitempty"` +} + +// AzureCloudEnvironment is the name of the Azure cloud environment +// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud;AzureStackCloud +type AzureCloudEnvironment string + +const ( + // AzurePublicCloud is the general-purpose, public Azure cloud environment. + AzurePublicCloud AzureCloudEnvironment = "AzurePublicCloud" + + // AzureUSGovernmentCloud is the Azure cloud environment for the US government. + AzureUSGovernmentCloud AzureCloudEnvironment = "AzureUSGovernmentCloud" + + // AzureChinaCloud is the Azure cloud environment used in China. + AzureChinaCloud AzureCloudEnvironment = "AzureChinaCloud" + + // AzureGermanCloud is the Azure cloud environment used in Germany. + AzureGermanCloud AzureCloudEnvironment = "AzureGermanCloud" + + // AzureStackCloud is the Azure cloud environment used at the edge and on premises. + AzureStackCloud AzureCloudEnvironment = "AzureStackCloud" +) + +// GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. +// This only includes fields that can be modified in the cluster. +type GCPPlatformSpec struct{} + +// GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +type GCPPlatformStatus struct { + // resourceGroupName is the Project ID for new GCP resources created for the cluster. + ProjectID string `json:"projectID"` + + // region holds the region for new GCP resources created for the cluster. + Region string `json:"region"` +} + +// BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type BareMetalPlatformSpec struct{} + +// BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md +type BareMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // BareMetal deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OpenStackPlatformSpec struct{} + +// OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider. +type OpenStackPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // cloudName is the name of the desired OpenStack cloud in the + // client configuration file (`clouds.yaml`). + CloudName string `json:"cloudName,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // OpenStack deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type OvirtPlatformSpec struct{} + +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. +// This only includes fields that can be modified in the cluster. +type VSpherePlatformSpec struct{} + +// VSpherePlatformStatus holds the current status of the vSphere infrastructure provider. +type VSpherePlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // vSphere deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + +// IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type IBMCloudPlatformSpec struct{} + +//IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider. +type IBMCloudPlatformStatus struct { + // Location is where the cluster has been deployed + Location string `json:"location,omitempty"` + + // ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + ResourceGroupName string `json:"resourceGroupName,omitempty"` + + // ProviderType indicates the type of cluster that was created + ProviderType IBMCloudProviderType `json:"providerType,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` +} + +// KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. +// This only includes fields that can be modified in the cluster. +type KubevirtPlatformSpec struct{} + +// KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider. +type KubevirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. +// This only includes fields that can be modified in the cluster. +type EquinixMetalPlatformSpec struct{} + +// EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider. +type EquinixMetalPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` +} + +// PowervsServiceEndpoint stores the configuration of a custom url to +// override existing defaults of PowerVS Services. +type PowerVSServiceEndpoint struct { + // name is the name of the Power VS service. + // Few of the services are + // IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + // ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + // Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PowerVSPlatformSpec struct { + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +listType=map + // +listMapKey=name + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` +} + +// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +type PowerVSPlatformStatus struct { + // region holds the default Power VS region for new Power VS resources created by the cluster. + Region string `json:"region"` + + // zone holds the default zone for the new Power VS resources created by the cluster. + // Note: Currently only single-zone OCP clusters are supported + Zone string `json:"zone"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` +} + +// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AlibabaCloudPlatformSpec struct{} + +// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. +type AlibabaCloudPlatformStatus struct { + // region specifies the region for Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` + // +required + Region string `json:"region"` + // resourceGroupID is the ID of the resource group for the cluster. + // +kubebuilder:validation:Pattern=`^(rg-[0-9A-Za-z]+)?$` + // +optional + ResourceGroupID string `json:"resourceGroupID,omitempty"` + // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` +} + +// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +type AlibabaCloudResourceTag struct { + // key is the key of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Key string `json:"key"` + // value is the value of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Value string `json:"value"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InfrastructureList is +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type InfrastructureList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Infrastructure `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go new file mode 100644 index 000000000..2c6bed3cb --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -0,0 +1,211 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress holds cluster-wide information about ingress, including the default ingress domain +// used for routes. The canonical name is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Ingress struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec IngressSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status IngressStatus `json:"status"` +} + +type IngressSpec struct { + // domain is used to generate a default host name for a route when the + // route's host name is empty. The generated host name will follow this + // pattern: "..". + // + // It is also used as the default wildcard domain suffix for ingress. The + // default ingresscontroller domain will follow this pattern: "*.". + // + // Once set, changing domain is not currently supported. + Domain string `json:"domain"` + + // appsDomain is an optional domain to use instead of the one specified + // in the domain field when a Route is created without specifying an explicit + // host. If appsDomain is nonempty, this value is used to generate default + // host values for Route. Unlike domain, appsDomain may be modified after + // installation. + // This assumes a new ingresscontroller has been setup with a wildcard + // certificate. + // +optional + AppsDomain string `json:"appsDomain,omitempty"` + + // componentRoutes is an optional list of routes that are managed by OpenShift components + // that a cluster-admin is able to configure the hostname and serving certificate for. + // The namespace and name of each route in this list should match an existing entry in the + // status.componentRoutes list. + // + // To determine the set of configurable Routes, look at namespace and name of entries in the + // .status.componentRoutes list, where participating operators write the status of + // configurable routes. + // +optional + ComponentRoutes []ComponentRouteSpec `json:"componentRoutes,omitempty"` + + // requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes + // matching the domainPattern/s and namespaceSelector/s that are specified in the policy. + // Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route + // annotation, and affect route admission. + // + // A candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: + // "haproxy.router.openshift.io/hsts_header" + // E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + // + // - For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route + // is rejected. + // - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies + // determines the route's admission status. + // - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, + // then it may use any HSTS Policy annotation. + // + // The HSTS policy configuration may be changed after routes have already been created. An update to a previously + // admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. + // However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working. + // + // Note that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid. + // +optional + RequiredHSTSPolicies []RequiredHSTSPolicy `json:"requiredHSTSPolicies,omitempty"` +} + +// ConsumingUser is an alias for string which we add validation to. Currently only service accounts are supported. +// +kubebuilder:validation:Pattern="^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" +// +kubebuilder:validation:MinLength=1 +// +kubebuilder:validation:MaxLength=512 +type ConsumingUser string + +// Hostname is an alias for hostname string validation. +// +kubebuilder:validation:Format=hostname +type Hostname string + +type IngressStatus struct { + // componentRoutes is where participating operators place the current route status for routes whose + // hostnames and serving certificates can be customized by the cluster-admin. + // +optional + ComponentRoutes []ComponentRouteStatus `json:"componentRoutes,omitempty"` +} + +// ComponentRouteSpec allows for configuration of a route's hostname and serving certificate. +type ComponentRouteSpec struct { + // namespace is the namespace of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of status.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // hostname is the hostname that should be used by the route. + // +kubebuilder:validation:Required + // +required + Hostname Hostname `json:"hostname"` + + // servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. + // The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. + // If the custom hostname uses the default routing suffix of the cluster, + // the Secret specification for a serving certificate will not be needed. + // +optional + ServingCertKeyPairSecret SecretNameReference `json:"servingCertKeyPairSecret"` +} + +// ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate. +type ComponentRouteStatus struct { + // namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace + // ensures that no two components will conflict and the same component can be installed multiple times. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Required + // +required + Namespace string `json:"namespace"` + + // name is the logical name of the route to customize. It does not have to be the actual name of a route resource + // but it cannot be renamed. + // + // The namespace and name of this componentRoute must match a corresponding + // entry in the list of spec.componentRoutes if the route is to be customized. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Required + // +required + Name string `json:"name"` + + // defaultHostname is the hostname of this route prior to customization. + // +kubebuilder:validation:Required + // +required + DefaultHostname Hostname `json:"defaultHostname"` + + // consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret. + // +kubebuilder:validation:MaxItems=5 + // +optional + ConsumingUsers []ConsumingUser `json:"consumingUsers,omitempty"` + + // currentHostnames is the list of current names used by the route. Typically, this list should consist of a single + // hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list. + // +kubebuilder:validation:MinItems=1 + // +optional + CurrentHostnames []Hostname `json:"currentHostnames,omitempty"` + + // conditions are used to communicate the state of the componentRoutes entry. + // + // Supported conditions include Available, Degraded and Progressing. + // + // If available is true, the content served by the route can be accessed by users. This includes cases + // where a default may continue to serve content while the customized route specified by the cluster-admin + // is being configured. + // + // If Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. + // The currentHostnames field may or may not be in effect. + // + // If Progressing is true, that means the component is taking some action related to the componentRoutes entry. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied. + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required + // +required + RelatedObjects []ObjectReference `json:"relatedObjects"` +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 +type IngressList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Ingress `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go new file mode 100644 index 000000000..59392a96d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -0,0 +1,177 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. +// Please view network.spec for an explanation on what applies when configuring this resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration. + // As a general rule, this SHOULD NOT be read directly. Instead, you should + // consume the NetworkStatus, as it indicates the currently deployed configuration. + // Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. + // +kubebuilder:validation:Required + // +required + Spec NetworkSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status NetworkStatus `json:"status"` +} + +// NetworkSpec is the desired network configuration. +// As a general rule, this SHOULD NOT be read directly. Instead, you should +// consume the NetworkStatus, as it indicates the currently deployed configuration. +// Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each. +type NetworkSpec struct { + // IP address pool to use for pod IPs. + // This field is immutable after installation. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // IP address pool for services. + // Currently, we only support a single entry here. + // This field is immutable after installation. + ServiceNetwork []string `json:"serviceNetwork"` + + // NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). + // This should match a value that the cluster-network-operator understands, + // or else no networking will be installed. + // Currently supported values are: + // - OpenShiftSDN + // This field is immutable after installation. + NetworkType string `json:"networkType"` + + // externalIP defines configuration for controllers that + // affect Service.ExternalIP. If nil, then ExternalIP is + // not allowed to be set. + // +optional + ExternalIP *ExternalIPConfig `json:"externalIP,omitempty"` + + // The port range allowed for Services of type NodePort. + // If not specified, the default of 30000-32767 will be used. + // Such Services without a NodePort specified will have one + // automatically allocated from this range. + // This parameter can be updated after the cluster is + // installed. + // +kubebuilder:validation:Pattern=`^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$` + ServiceNodePortRange string `json:"serviceNodePortRange,omitempty"` +} + +// NetworkStatus is the current network configuration. +type NetworkStatus struct { + // IP address pool to use for pod IPs. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` + + // IP address pool for services. + // Currently, we only support a single entry here. + ServiceNetwork []string `json:"serviceNetwork,omitempty"` + + // NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + NetworkType string `json:"networkType,omitempty"` + + // ClusterNetworkMTU is the MTU for inter-pod networking. + ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` + + // Migration contains the cluster network migration configuration. + Migration *NetworkMigration `json:"migration,omitempty"` +} + +// ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs +// are allocated. +type ClusterNetworkEntry struct { + // The complete block for pod IPs. + CIDR string `json:"cidr"` + + // The size (prefix) of block to allocate to each node. If this + // field is not used by the plugin, it can be left unset. + // +kubebuilder:validation:Minimum=0 + // +optional + HostPrefix uint32 `json:"hostPrefix,omitempty"` +} + +// ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field +// of a Service resource. +type ExternalIPConfig struct { + // policy is a set of restrictions applied to the ExternalIP field. + // If nil or empty, then ExternalIP is not allowed to be set. + // +optional + Policy *ExternalIPPolicy `json:"policy,omitempty"` + + // autoAssignCIDRs is a list of CIDRs from which to automatically assign + // Service.ExternalIP. These are assigned when the service is of type + // LoadBalancer. In general, this is only useful for bare-metal clusters. + // In Openshift 3.x, this was misleadingly called "IngressIPs". + // Automatically assigned External IPs are not affected by any + // ExternalIPPolicy rules. + // Currently, only one entry may be provided. + // +optional + AutoAssignCIDRs []string `json:"autoAssignCIDRs,omitempty"` +} + +// ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP +// field in a Service. If the zero struct is supplied, then none are permitted. +// The policy controller always allows automatically assigned external IPs. +type ExternalIPPolicy struct { + // allowedCIDRs is the list of allowed CIDRs. + AllowedCIDRs []string `json:"allowedCIDRs,omitempty"` + + // rejectedCIDRs is the list of disallowed CIDRs. These take precedence + // over allowedCIDRs. + // +optional + RejectedCIDRs []string `json:"rejectedCIDRs,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Network `json:"items"` +} + +// NetworkMigration represents the cluster network configuration. +type NetworkMigration struct { + // NetworkType is the target plugin that is to be deployed. + // Currently supported values are: OpenShiftSDN, OVNKubernetes + // +kubebuilder:validation:Enum={"OpenShiftSDN","OVNKubernetes"} + // +optional + NetworkType string `json:"networkType,omitempty"` + + // MTU contains the MTU migration configuration. + // +optional + MTU *MTUMigration `json:"mtu,omitempty"` +} + +// MTUMigration contains infomation about MTU migration. +type MTUMigration struct { + // Network contains MTU migration configuration for the default network. + // +optional + Network *MTUMigrationValues `json:"network,omitempty"` + + // Machine contains MTU migration configuration for the machine's uplink. + // +optional + Machine *MTUMigrationValues `json:"machine,omitempty"` +} + +// MTUMigrationValues contains the values for a MTU migration. +type MTUMigrationValues struct { + // To is the MTU to migrate to. + // +kubebuilder:validation:Minimum=0 + To *uint32 `json:"to"` + + // From is the MTU to migrate from. + // +kubebuilder:validation:Minimum=0 + // +optional + From *uint32 `json:"from,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go new file mode 100644 index 000000000..02fbbf9d4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -0,0 +1,585 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// OAuth Server and Identity Provider Config + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. +// It is used to configure the integrated OAuth server. +// This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuth struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec OAuthSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OAuthStatus `json:"status"` +} + +// OAuthSpec contains desired cluster auth configuration +type OAuthSpec struct { + // identityProviders is an ordered list of ways for a user to identify themselves. + // When this list is empty, no identities are provisioned for users. + // +optional + // +listType=atomic + IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + // +optional + Templates OAuthTemplates `json:"templates"` +} + +// OAuthStatus shows current known state of OAuth server in the cluster +type OAuthStatus struct { + // TODO Fill in with status of identityProviders and templates (and maybe tokenConfig) +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +const ( + // LoginTemplateKey is the key of the login template in a secret + LoginTemplateKey = "login.html" + + // ProviderSelectionTemplateKey is the key for the provider selection template in a secret + ProviderSelectionTemplateKey = "providers.html" + + // ErrorsTemplateKey is the key for the errors template in a secret + ErrorsTemplateKey = "errors.html" + + // BindPasswordKey is the key for the LDAP bind password in a secret + BindPasswordKey = "bindPassword" + + // ClientSecretKey is the key for the oauth client secret data in a secret + ClientSecretKey = "clientSecret" + + // HTPasswdDataKey is the key for the htpasswd file data in a secret + HTPasswdDataKey = "htpasswd" +) + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is the name of a secret that specifies a go template to use to render the login page. + // The key "login.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default login page is used. + // If the specified template is not valid, the default login page is used. + // If unspecified, the default login page is used. + // The namespace for this secret is openshift-config. + // +optional + Login SecretNameReference `json:"login"` + + // providerSelection is the name of a secret that specifies a go template to use to render + // the provider selection page. + // The key "providers.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default provider selection page is used. + // If the specified template is not valid, the default provider selection page is used. + // If unspecified, the default provider selection page is used. + // The namespace for this secret is openshift-config. + // +optional + ProviderSelection SecretNameReference `json:"providerSelection"` + + // error is the name of a secret that specifies a go template to use to render error pages + // during the authentication or grant flow. + // The key "errors.html" is used to locate the template data. + // If specified and the secret or expected key is not found, the default error page is used. + // If the specified template is not valid, the default error page is used. + // If unspecified, the default error page is used. + // The namespace for this secret is openshift-config. + // +optional + Error SecretNameReference `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider. + // - It MUST be unique and not shared by any other identity provider used + // - It MUST be a valid path segment: name cannot equal "." or ".." or contain "/" or "%" or ":" + // Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName + Name string `json:"name"` + + // mappingMethod determines how identities from this provider are mapped to users + // Defaults to "claim" + // +optional + MappingMethod MappingMethodType `json:"mappingMethod,omitempty"` + + IdentityProviderConfig `json:",inline"` +} + +// MappingMethodType specifies how new identities should be mapped to users when they log in +type MappingMethodType string + +const ( + // MappingMethodClaim provisions a user with the identity’s preferred user name. Fails if a user + // with that user name is already mapped to another identity. + // Default. + MappingMethodClaim MappingMethodType = "claim" + + // MappingMethodLookup looks up existing users already mapped to an identity but does not + // automatically provision users or identities. Requires identities and users be set up + // manually or using an external process. + MappingMethodLookup MappingMethodType = "lookup" + + // MappingMethodAdd provisions a user with the identity’s preferred user name. If a user with + // that user name already exists, the identity is mapped to the existing user, adding to any + // existing identity mappings for the user. + MappingMethodAdd MappingMethodType = "add" +) + +type IdentityProviderType string + +const ( + // IdentityProviderTypeBasicAuth provides identities for users authenticating with HTTP Basic Auth + IdentityProviderTypeBasicAuth IdentityProviderType = "BasicAuth" + + // IdentityProviderTypeGitHub provides identities for users authenticating using GitHub credentials + IdentityProviderTypeGitHub IdentityProviderType = "GitHub" + + // IdentityProviderTypeGitLab provides identities for users authenticating using GitLab credentials + IdentityProviderTypeGitLab IdentityProviderType = "GitLab" + + // IdentityProviderTypeGoogle provides identities for users authenticating using Google credentials + IdentityProviderTypeGoogle IdentityProviderType = "Google" + + // IdentityProviderTypeHTPasswd provides identities from an HTPasswd file + IdentityProviderTypeHTPasswd IdentityProviderType = "HTPasswd" + + // IdentityProviderTypeKeystone provides identitities for users authenticating using keystone password credentials + IdentityProviderTypeKeystone IdentityProviderType = "Keystone" + + // IdentityProviderTypeLDAP provides identities for users authenticating using LDAP credentials + IdentityProviderTypeLDAP IdentityProviderType = "LDAP" + + // IdentityProviderTypeOpenID provides identities for users authenticating using OpenID credentials + IdentityProviderTypeOpenID IdentityProviderType = "OpenID" + + // IdentityProviderTypeRequestHeader provides identities for users authenticating using request header credentials + IdentityProviderTypeRequestHeader IdentityProviderType = "RequestHeader" +) + +// IdentityProviderConfig contains configuration for using a specific identity provider +type IdentityProviderConfig struct { + // type identifies the identity provider type for this entry. + Type IdentityProviderType `json:"type"` + + // Provider-specific configuration + // The json tag MUST match the `Type` specified above, case-insensitively + // e.g. For `Type: "LDAP"`, the `ldap` configuration should be provided + + // basicAuth contains configuration options for the BasicAuth IdP + // +optional + BasicAuth *BasicAuthIdentityProvider `json:"basicAuth,omitempty"` + + // github enables user authentication using GitHub credentials + // +optional + GitHub *GitHubIdentityProvider `json:"github,omitempty"` + + // gitlab enables user authentication using GitLab credentials + // +optional + GitLab *GitLabIdentityProvider `json:"gitlab,omitempty"` + + // google enables user authentication using Google credentials + // +optional + Google *GoogleIdentityProvider `json:"google,omitempty"` + + // htpasswd enables user authentication using an HTPasswd file to validate credentials + // +optional + HTPasswd *HTPasswdIdentityProvider `json:"htpasswd,omitempty"` + + // keystone enables user authentication using keystone password credentials + // +optional + Keystone *KeystoneIdentityProvider `json:"keystone,omitempty"` + + // ldap enables user authentication using LDAP credentials + // +optional + LDAP *LDAPIdentityProvider `json:"ldap,omitempty"` + + // openID enables user authentication using OpenID credentials + // +optional + OpenID *OpenIDIdentityProvider `json:"openID,omitempty"` + + // requestHeader enables user authentication using request header credentials + // +optional + RequestHeader *RequestHeaderIdentityProvider `json:"requestHeader,omitempty"` +} + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +type BasicAuthIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the external basic auth server + OAuthRemoteConnectionInfo `json:",inline"` +} + +// OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection +type OAuthRemoteConnectionInfo struct { + // url is the remote URL to connect to + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // tlsClientCert is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate to present when connecting to the server. + // The key "tls.crt" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientCert SecretNameReference `json:"tlsClientCert"` + + // tlsClientKey is an optional reference to a secret by name that contains the + // PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. + // The key "tls.key" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // If the specified certificate data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + TLSClientKey SecretNameReference `json:"tlsClientKey"` +} + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +type HTPasswdIdentityProvider struct { + // fileData is a required reference to a secret by name containing the data to use as the htpasswd file. + // The key "htpasswd" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // If the specified htpasswd data is not valid, the identity provider is not honored. + // The namespace for this secret is openshift-config. + FileData SecretNameReference `json:"fileData"` +} + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +type LDAPIdentityProvider struct { + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. + // The syntax of the URL is: + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + + // bindDN is an optional DN to bind with during the search phase. + // +optional + BindDN string `json:"bindDN"` + + // bindPassword is an optional reference to a secret by name + // containing a password to bind with during the search phase. + // The key "bindPassword" is used to locate the data. + // If specified and the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + // +optional + BindPassword SecretNameReference `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS + // WARNING: Should not be set to `true` with the URL scheme "ldaps://" as "ldaps://" URLs always + // attempt to connect using TLS, even when `insecure` is set to `true` + // When `true`, "ldap://" URLS connect insecurely. When `false`, "ldap://" URLs are upgraded to + // a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830. + Insecure bool `json:"insecure"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // First non-empty attribute is used. At least one attribute is required. If none of the listed + // attribute have a value, authentication fails. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +optional + Email []string `json:"email,omitempty"` +} + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +type KeystoneIdentityProvider struct { + // OAuthRemoteConnectionInfo contains information about how to connect to the keystone server + OAuthRemoteConnectionInfo `json:",inline"` + + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + + // TODO if we ever add support for 3.11 to 4.0 upgrades, add this configuration + // useUsernameIdentity indicates that users should be authenticated by username, not keystone ID + // DEPRECATED - only use this option for legacy systems to ensure backwards compatibility + // +optional + // UseUsernameIdentity bool `json:"useUsernameIdentity"` +} + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +type RequestHeaderIdentityProvider struct { + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when login is set to true. + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be + // redirected here. + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + // Required when challenge is set to true. + ChallengeURL string `json:"challengeURL"` + + // ca is a required reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // Specifically, it allows verification of incoming requests to prevent header spoofing. + // The key "ca.crt" is used to locate the data. + // If the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // The namespace for this config map is openshift-config. + ClientCA ConfigMapNameReference `json:"ca"` + + // clientCommonNames is an optional list of common names to require a match from. If empty, any + // client certificate validated against the clientCA bundle is considered authoritative. + // +optional + ClientCommonNames []string `json:"clientCommonNames,omitempty"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +type GitHubIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // organizations optionally restricts which organizations are allowed to log in + // +optional + Organizations []string `json:"organizations,omitempty"` + + // teams optionally restricts which teams are allowed to log in. Format is /. + // +optional + Teams []string `json:"teams,omitempty"` + + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of + // GitHub Enterprise. + // It must match the GitHub Enterprise settings value configured at /setup/settings#hostname. + // +optional + Hostname string `json:"hostname"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // This can only be configured when hostname is set to a non-empty value. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +type GitLabIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // url is the oauth server base URL + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` +} + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +type GoogleIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + // +optional + HostedDomain string `json:"hostedDomain"` +} + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +type OpenIDIdentityProvider struct { + // clientID is the oauth client ID + ClientID string `json:"clientID"` + + // clientSecret is a required reference to the secret by name containing the oauth client secret. + // The key "clientSecret" is used to locate the data. + // If the secret or expected key is not found, the identity provider is not honored. + // The namespace for this secret is openshift-config. + ClientSecret SecretNameReference `json:"clientSecret"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca.crt" is used to locate the data. + // If specified and the config map or expected key is not found, the identity provider is not honored. + // If the specified ca data is not valid, the identity provider is not honored. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA ConfigMapNameReference `json:"ca"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + // +optional + ExtraScopes []string `json:"extraScopes,omitempty"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + // +optional + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters,omitempty"` + + // issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. + // It must use the https scheme with no query or fragment component. + Issuer string `json:"issuer"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// UserIDClaim is the claim used to provide a stable identifier for OIDC identities. +// Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." +const UserIDClaim = "sub" + +// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo +// responses +// +kubebuilder:validation:MinLength=1 +type OpenIDClaim string + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the sub claim + // +listType=atomic + // +optional + PreferredUsername []string `json:"preferredUsername,omitempty"` + + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // +listType=atomic + // +optional + Name []string `json:"name,omitempty"` + + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + // +listType=atomic + // +optional + Email []string `json:"email,omitempty"` + + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user. + // If multiple claims are specified, the first one with a non-empty value is used. + // +listType=atomic + // +optional + Groups []OpenIDClaim `json:"groups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []OAuth `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go new file mode 100644 index 000000000..67a029529 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -0,0 +1,85 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorHubSpec defines the desired state of OperatorHub +type OperatorHubSpec struct { + // disableAllDefaultSources allows you to disable all the default hub + // sources. If this is true, a specific entry in sources can be used to + // enable a default source. If this is false, a specific entry in + // sources can be used to disable or enable a default source. + // +optional + DisableAllDefaultSources bool `json:"disableAllDefaultSources,omitempty"` + // sources is the list of default hub sources and their configuration. + // If the list is empty, it implies that the default hub sources are + // enabled on the cluster unless disableAllDefaultSources is true. + // If disableAllDefaultSources is true and sources is not empty, + // the configuration present in sources will take precedence. The list of + // default hub sources and their current state will always be reflected in + // the status block. + // +optional + Sources []HubSource `json:"sources,omitempty"` +} + +// OperatorHubStatus defines the observed state of OperatorHub. The current +// state of the default hub sources will always be reflected here. +type OperatorHubStatus struct { + // sources encapsulates the result of applying the configuration for each + // hub source + Sources []HubSourceStatus `json:"sources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHub is the Schema for the operatorhubs API. It can be used to change +// the state of the default hub sources for OperatorHub on the cluster from +// enabled to disabled and vice versa. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:subresource:status +// +genclient +// +genclient:nonNamespaced +// +openshift:compatibility-gen:level=1 +type OperatorHub struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec OperatorHubSpec `json:"spec"` + Status OperatorHubStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OperatorHubList contains a list of OperatorHub +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OperatorHubList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []OperatorHub `json:"items"` +} + +// HubSource is used to specify the hub source and its configuration +type HubSource struct { + // name is the name of one of the default hub sources + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:Required + Name string `json:"name"` + // disabled is used to disable a default hub source on cluster + // +kubebuilder:Required + Disabled bool `json:"disabled"` +} + +// HubSourceStatus is used to reflect the current state of applying the +// configuration to a default source +type HubSourceStatus struct { + HubSource `json:",omitempty"` + // status indicates success or failure in applying the configuration + Status string `json:"status,omitempty"` + // message provides more information regarding failures + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go new file mode 100644 index 000000000..add6abf66 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -0,0 +1,59 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Project holds cluster-wide information about Project. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Project struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ProjectSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProjectStatus `json:"status"` +} + +// TemplateReference references a template in a specific namespace. +// The namespace must be specified at the point of use. +type TemplateReference struct { + // name is the metadata.name of the referenced project request template + Name string `json:"name"` +} + +// ProjectSpec holds the project creation configuration. +type ProjectSpec struct { + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + // +optional + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // This must point to a template in 'openshift-config' namespace. It is optional. + // If it is not specified, a default template is used. + // + // +optional + ProjectRequestTemplate TemplateReference `json:"projectRequestTemplate"` +} + +type ProjectStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Project `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go new file mode 100644 index 000000000..01ee4690d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -0,0 +1,99 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster` +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Proxy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec holds user-settable values for the proxy configuration + // +kubebuilder:validation:Required + // +required + Spec ProxySpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status ProxyStatus `json:"status"` +} + +// ProxySpec contains cluster proxy creation configuration. +type ProxySpec struct { + // httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. + // Empty means unset and will not result in an env var. + // +optional + NoProxy string `json:"noProxy,omitempty"` + + // readinessEndpoints is a list of endpoints used to verify readiness of the proxy. + // +optional + ReadinessEndpoints []string `json:"readinessEndpoints,omitempty"` + + // trustedCA is a reference to a ConfigMap containing a CA certificate bundle. + // The trustedCA field should only be consumed by a proxy validator. The + // validator is responsible for reading the certificate bundle from the required + // key "ca-bundle.crt", merging it with the system default trust bundle, + // and writing the merged trust bundle to a ConfigMap named "trusted-ca-bundle" + // in the "openshift-config-managed" namespace. Clients that expect to make + // proxy connections must use the trusted-ca-bundle for all HTTPS requests to + // the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as + // well. + // + // The namespace for the ConfigMap referenced by trustedCA is + // "openshift-config". Here is an example ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: user-ca-bundle + // namespace: openshift-config + // data: + // ca-bundle.crt: | + // -----BEGIN CERTIFICATE----- + // Custom CA certificate bundle. + // -----END CERTIFICATE----- + // + // +optional + TrustedCA ConfigMapNameReference `json:"trustedCA,omitempty"` +} + +// ProxyStatus shows current known state of the cluster proxy. +type ProxyStatus struct { + // httpProxy is the URL of the proxy for HTTP requests. + // +optional + HTTPProxy string `json:"httpProxy,omitempty"` + + // httpsProxy is the URL of the proxy for HTTPS requests. + // +optional + HTTPSProxy string `json:"httpsProxy,omitempty"` + + // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProxyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Proxy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go new file mode 100644 index 000000000..a69d2a35c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -0,0 +1,105 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scheduler holds cluster-wide config information to run the Kubernetes Scheduler +// and influence its placement decisions. The canonical name for this config is `cluster`. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Scheduler struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec SchedulerSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status SchedulerStatus `json:"status"` +} + +type SchedulerSpec struct { + // DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. + // policy is a reference to a ConfigMap containing scheduler policy which has + // user specified predicates and priorities. If this ConfigMap is not available + // scheduler will default to use DefaultAlgorithmProvider. + // The namespace for this configmap is openshift-config. + // +optional + Policy ConfigMapNameReference `json:"policy,omitempty"` + // profile sets which scheduling profile should be set in order to configure scheduling + // decisions for new pods. + // + // Valid values are "LowNodeUtilization", "HighNodeUtilization", "NoScoring" + // Defaults to "LowNodeUtilization" + // +optional + Profile SchedulerProfile `json:"profile,omitempty"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces and creates an intersection with any existing + // nodeSelectors already set on a pod, additionally constraining that pod's selector. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` + // MastersSchedulable allows masters nodes to be schedulable. When this flag is + // turned on, all the master nodes in the cluster will be made schedulable, + // so that workload pods can run on them. The default value for this field is false, + // meaning none of the master nodes are schedulable. + // Important Note: Once the workload pods start running on the master nodes, + // extreme care must be taken to ensure that cluster-critical control plane components + // are not impacted. + // Please turn on this field after doing due diligence. + // +optional + MastersSchedulable bool `json:"mastersSchedulable"` +} + +// +kubebuilder:validation:Enum="";LowNodeUtilization;HighNodeUtilization;NoScoring +type SchedulerProfile string + +var ( + // LowNodeUtililization is the default, and defines a scheduling profile which prefers to + // spread pods evenly among nodes targeting low resource consumption on each node. + LowNodeUtilization SchedulerProfile = "LowNodeUtilization" + + // HighNodeUtilization defines a scheduling profile which packs as many pods as possible onto + // as few nodes as possible targeting a small node count but high resource usage on each node. + HighNodeUtilization SchedulerProfile = "HighNodeUtilization" + + // NoScoring defines a scheduling profile which tries to provide lower-latency scheduling + // at the expense of potentially less optimal pod placement decisions. + NoScoring SchedulerProfile = "NoScoring" +) + +type SchedulerStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SchedulerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []Scheduler `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 000000000..9dbacb996 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,262 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides + // the ability to specify individual TLS security profile parameters. + // Old, Intermediate and Modern are TLS security profiles based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // + // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers + // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be + // reduced. + // + // Note that the Modern profile is currently not supported because it is not + // yet well adopted by common software libraries. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + // old is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // - DHE-RSA-CHACHA20-POLY1305 + // - ECDHE-ECDSA-AES128-SHA256 + // - ECDHE-RSA-AES128-SHA256 + // - ECDHE-ECDSA-AES128-SHA + // - ECDHE-RSA-AES128-SHA + // - ECDHE-ECDSA-AES256-SHA384 + // - ECDHE-RSA-AES256-SHA384 + // - ECDHE-ECDSA-AES256-SHA + // - ECDHE-RSA-AES256-SHA + // - DHE-RSA-AES128-SHA256 + // - DHE-RSA-AES256-SHA256 + // - AES128-GCM-SHA256 + // - AES256-GCM-SHA384 + // - AES128-SHA256 + // - AES256-SHA256 + // - AES128-SHA + // - AES256-SHA + // - DES-CBC3-SHA + // minTLSVersion: TLSv1.0 + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + // intermediate is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - DHE-RSA-AES128-GCM-SHA256 + // - DHE-RSA-AES256-GCM-SHA384 + // minTLSVersion: TLSv1.2 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + // modern is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // minTLSVersion: TLSv1.3 + // + // NOTE: Currently unsupported. + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // ciphers: + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // minTLSVersion: TLSv1.1 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +// +kubebuilder:validation:Enum=Old;Intermediate;Modern;Custom +type TLSProfileType string + +const ( + // Old is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + TLSProfileOldType TLSProfileType = "Old" + // Intermediate is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + TLSProfileIntermediateType TLSProfileType = "Intermediate" + // Modern is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + TLSProfileModernType TLSProfileType = "Modern" + // Custom is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: TLSv1.1 + // + // NOTE: currently the highest minTLSVersion allowed is VersionTLS12 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +// +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// +// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all +// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, +// just be sure to whitelist only and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + "DHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-ECDSA-AES256-SHA384", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "DHE-RSA-AES128-SHA256", + "DHE-RSA-AES256-SHA256", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES256-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "DHE-RSA-AES128-GCM-SHA256", + "DHE-RSA-AES256-GCM-SHA384", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..00ffa3233 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,4366 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServer) DeepCopyInto(out *APIServer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. +func (in *APIServer) DeepCopy() *APIServer { + if in == nil { + return nil + } + out := new(APIServer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerEncryption. +func (in *APIServerEncryption) DeepCopy() *APIServerEncryption { + if in == nil { + return nil + } + out := new(APIServerEncryption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerList) DeepCopyInto(out *APIServerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIServer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerList. +func (in *APIServerList) DeepCopy() *APIServerList { + if in == nil { + return nil + } + out := new(APIServerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerNamedServingCert) DeepCopyInto(out *APIServerNamedServingCert) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ServingCertificate = in.ServingCertificate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerNamedServingCert. +func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { + if in == nil { + return nil + } + out := new(APIServerNamedServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { + *out = *in + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]APIServerNamedServingCert, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerServingCerts. +func (in *APIServerServingCerts) DeepCopy() *APIServerServingCerts { + if in == nil { + return nil + } + out := new(APIServerServingCerts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { + *out = *in + in.ServingCerts.DeepCopyInto(&out.ServingCerts) + out.ClientCA = in.ClientCA + if in.AdditionalCORSAllowedOrigins != nil { + in, out := &in.AdditionalCORSAllowedOrigins, &out.AdditionalCORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Encryption = in.Encryption + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + in.Audit.DeepCopyInto(&out.Audit) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerSpec. +func (in *APIServerSpec) DeepCopy() *APIServerSpec { + if in == nil { + return nil + } + out := new(APIServerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerStatus) DeepCopyInto(out *APIServerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerStatus. +func (in *APIServerStatus) DeepCopy() *APIServerStatus { + if in == nil { + return nil + } + out := new(APIServerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformSpec. +func (in *AWSPlatformSpec) DeepCopy() *AWSPlatformSpec { + if in == nil { + return nil + } + out := new(AWSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]AWSServiceEndpoint, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AWSResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceTag) DeepCopyInto(out *AWSResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceTag. +func (in *AWSResourceTag) DeepCopy() *AWSResourceTag { + if in == nil { + return nil + } + out := new(AWSResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSServiceEndpoint) DeepCopyInto(out *AWSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSServiceEndpoint. +func (in *AWSServiceEndpoint) DeepCopy() *AWSServiceEndpoint { + if in == nil { + return nil + } + out := new(AWSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]AdmissionPluginConfig, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.EnabledAdmissionPlugins != nil { + in, out := &in.EnabledAdmissionPlugins, &out.EnabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAdmissionPlugins != nil { + in, out := &in.DisabledAdmissionPlugins, &out.DisabledAdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. +func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AlibabaCloudResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. +func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. +func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { + if in == nil { + return nil + } + out := new(AlibabaCloudResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + if in.CustomRules != nil { + in, out := &in.CustomRules, &out.CustomRules + *out = make([]AuditCustomRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditCustomRule) DeepCopyInto(out *AuditCustomRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditCustomRule. +func (in *AuditCustomRule) DeepCopy() *AuditCustomRule { + if in == nil { + return nil + } + out := new(AuditCustomRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Authentication) DeepCopyInto(out *Authentication) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Authentication. +func (in *Authentication) DeepCopy() *Authentication { + if in == nil { + return nil + } + out := new(Authentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Authentication) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationList) DeepCopyInto(out *AuthenticationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Authentication, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationList. +func (in *AuthenticationList) DeepCopy() *AuthenticationList { + if in == nil { + return nil + } + out := new(AuthenticationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthenticationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationSpec) DeepCopyInto(out *AuthenticationSpec) { + *out = *in + out.OAuthMetadata = in.OAuthMetadata + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]DeprecatedWebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + if in.WebhookTokenAuthenticator != nil { + in, out := &in.WebhookTokenAuthenticator, &out.WebhookTokenAuthenticator + *out = new(WebhookTokenAuthenticator) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationSpec. +func (in *AuthenticationSpec) DeepCopy() *AuthenticationSpec { + if in == nil { + return nil + } + out := new(AuthenticationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthenticationStatus) DeepCopyInto(out *AuthenticationStatus) { + *out = *in + out.IntegratedOAuthMetadata = in.IntegratedOAuthMetadata + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthenticationStatus. +func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { + if in == nil { + return nil + } + out := new(AuthenticationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformSpec) DeepCopyInto(out *AzurePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformSpec. +func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { + if in == nil { + return nil + } + out := new(AzurePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzurePlatformStatus. +func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { + if in == nil { + return nil + } + out := new(AzurePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformSpec. +func (in *BareMetalPlatformSpec) DeepCopy() *BareMetalPlatformSpec { + if in == nil { + return nil + } + out := new(BareMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformStatus. +func (in *BareMetalPlatformStatus) DeepCopy() *BareMetalPlatformStatus { + if in == nil { + return nil + } + out := new(BareMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthIdentityProvider) DeepCopyInto(out *BasicAuthIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthIdentityProvider. +func (in *BasicAuthIdentityProvider) DeepCopy() *BasicAuthIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaults) DeepCopyInto(out *BuildDefaults) { + *out = *in + if in.DefaultProxy != nil { + in, out := &in.DefaultProxy, &out.DefaultProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.GitProxy != nil { + in, out := &in.GitProxy, &out.GitProxy + *out = new(ProxySpec) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaults. +func (in *BuildDefaults) DeepCopy() *BuildDefaults { + if in == nil { + return nil + } + out := new(BuildDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverrides) DeepCopyInto(out *BuildOverrides) { + *out = *in + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverrides. +func (in *BuildOverrides) DeepCopy() *BuildOverrides { + if in == nil { + return nil + } + out := new(BuildOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.BuildDefaults.DeepCopyInto(&out.BuildDefaults) + in.BuildOverrides.DeepCopyInto(&out.BuildOverrides) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + if in.PromQL != nil { + in, out := &in.PromQL, &out.PromQL + *out = new(PromQLClusterCondition) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperator) DeepCopyInto(out *ClusterOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperator. +func (in *ClusterOperator) DeepCopy() *ClusterOperator { + if in == nil { + return nil + } + out := new(ClusterOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorList) DeepCopyInto(out *ClusterOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorList. +func (in *ClusterOperatorList) DeepCopy() *ClusterOperatorList { + if in == nil { + return nil + } + out := new(ClusterOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorSpec) DeepCopyInto(out *ClusterOperatorSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorSpec. +func (in *ClusterOperatorSpec) DeepCopy() *ClusterOperatorSpec { + if in == nil { + return nil + } + out := new(ClusterOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatus) DeepCopyInto(out *ClusterOperatorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]OperandVersion, len(*in)) + copy(*out, *in) + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + in.Extension.DeepCopyInto(&out.Extension) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatus. +func (in *ClusterOperatorStatus) DeepCopy() *ClusterOperatorStatus { + if in == nil { + return nil + } + out := new(ClusterOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterOperatorStatusCondition) DeepCopyInto(out *ClusterOperatorStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterOperatorStatusCondition. +func (in *ClusterOperatorStatusCondition) DeepCopy() *ClusterOperatorStatusCondition { + if in == nil { + return nil + } + out := new(ClusterOperatorStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersion) DeepCopyInto(out *ClusterVersion) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersion. +func (in *ClusterVersion) DeepCopy() *ClusterVersion { + if in == nil { + return nil + } + out := new(ClusterVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersion) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionList) DeepCopyInto(out *ClusterVersionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterVersion, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionList. +func (in *ClusterVersionList) DeepCopy() *ClusterVersionList { + if in == nil { + return nil + } + out := new(ClusterVersionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterVersionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionSpec) DeepCopyInto(out *ClusterVersionSpec) { + *out = *in + if in.DesiredUpdate != nil { + in, out := &in.DesiredUpdate, &out.DesiredUpdate + *out = new(Update) + **out = **in + } + if in.Overrides != nil { + in, out := &in.Overrides, &out.Overrides + *out = make([]ComponentOverride, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionSpec. +func (in *ClusterVersionSpec) DeepCopy() *ClusterVersionSpec { + if in == nil { + return nil + } + out := new(ClusterVersionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { + *out = *in + in.Desired.DeepCopyInto(&out.Desired) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]UpdateHistory, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterOperatorStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AvailableUpdates != nil { + in, out := &in.AvailableUpdates, &out.AvailableUpdates + *out = make([]Release, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterVersionStatus. +func (in *ClusterVersionStatus) DeepCopy() *ClusterVersionStatus { + if in == nil { + return nil + } + out := new(ClusterVersionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentOverride) DeepCopyInto(out *ComponentOverride) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentOverride. +func (in *ComponentOverride) DeepCopy() *ComponentOverride { + if in == nil { + return nil + } + out := new(ComponentOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteSpec) DeepCopyInto(out *ComponentRouteSpec) { + *out = *in + out.ServingCertKeyPairSecret = in.ServingCertKeyPairSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteSpec. +func (in *ComponentRouteSpec) DeepCopy() *ComponentRouteSpec { + if in == nil { + return nil + } + out := new(ComponentRouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentRouteStatus) DeepCopyInto(out *ComponentRouteStatus) { + *out = *in + if in.ConsumingUsers != nil { + in, out := &in.ConsumingUsers, &out.ConsumingUsers + *out = make([]ConsumingUser, len(*in)) + copy(*out, *in) + } + if in.CurrentHostnames != nil { + in, out := &in.CurrentHostnames, &out.CurrentHostnames + *out = make([]Hostname, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RelatedObjects != nil { + in, out := &in.RelatedObjects, &out.RelatedObjects + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentRouteStatus. +func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { + if in == nil { + return nil + } + out := new(ComponentRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { + *out = *in + in.Release.DeepCopyInto(&out.Release) + if in.Risks != nil { + in, out := &in.Risks, &out.Risks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. +func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { + if in == nil { + return nil + } + out := new(ConditionalUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { + *out = *in + if in.MatchingRules != nil { + in, out := &in.MatchingRules, &out.MatchingRules + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. +func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { + if in == nil { + return nil + } + out := new(ConditionalUpdateRisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapFileReference. +func (in *ConfigMapFileReference) DeepCopy() *ConfigMapFileReference { + if in == nil { + return nil + } + out := new(ConfigMapFileReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNameReference) DeepCopyInto(out *ConfigMapNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNameReference. +func (in *ConfigMapNameReference) DeepCopy() *ConfigMapNameReference { + if in == nil { + return nil + } + out := new(ConfigMapNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Console) DeepCopyInto(out *Console) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Console. +func (in *Console) DeepCopy() *Console { + if in == nil { + return nil + } + out := new(Console) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Console) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleAuthentication) DeepCopyInto(out *ConsoleAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleAuthentication. +func (in *ConsoleAuthentication) DeepCopy() *ConsoleAuthentication { + if in == nil { + return nil + } + out := new(ConsoleAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleList) DeepCopyInto(out *ConsoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Console, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleList. +func (in *ConsoleList) DeepCopy() *ConsoleList { + if in == nil { + return nil + } + out := new(ConsoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSpec) DeepCopyInto(out *ConsoleSpec) { + *out = *in + out.Authentication = in.Authentication + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSpec. +func (in *ConsoleSpec) DeepCopy() *ConsoleSpec { + if in == nil { + return nil + } + out := new(ConsoleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleStatus) DeepCopyInto(out *ConsoleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleStatus. +func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { + if in == nil { + return nil + } + out := new(ConsoleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomFeatureGates. +func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { + if in == nil { + return nil + } + out := new(CustomFeatureGates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNS) DeepCopyInto(out *DNS) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNS. +func (in *DNS) DeepCopy() *DNS { + if in == nil { + return nil + } + out := new(DNS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNS) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSList) DeepCopyInto(out *DNSList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DNS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSList. +func (in *DNSList) DeepCopy() *DNSList { + if in == nil { + return nil + } + out := new(DNSList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DNSList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { + *out = *in + if in.PublicZone != nil { + in, out := &in.PublicZone, &out.PublicZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + if in.PrivateZone != nil { + in, out := &in.PrivateZone, &out.PrivateZone + *out = new(DNSZone) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSSpec. +func (in *DNSSpec) DeepCopy() *DNSSpec { + if in == nil { + return nil + } + out := new(DNSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSStatus) DeepCopyInto(out *DNSStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSStatus. +func (in *DNSStatus) DeepCopy() *DNSStatus { + if in == nil { + return nil + } + out := new(DNSStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSZone) DeepCopyInto(out *DNSZone) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSZone. +func (in *DNSZone) DeepCopy() *DNSZone { + if in == nil { + return nil + } + out := new(DNSZone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthentication. +func (in *DelegatedAuthentication) DeepCopy() *DelegatedAuthentication { + if in == nil { + return nil + } + out := new(DelegatedAuthentication) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DelegatedAuthorization) DeepCopyInto(out *DelegatedAuthorization) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DelegatedAuthorization. +func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { + if in == nil { + return nil + } + out := new(DelegatedAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopyInto(out *DeprecatedWebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeprecatedWebhookTokenAuthenticator. +func (in *DeprecatedWebhookTokenAuthenticator) DeepCopy() *DeprecatedWebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(DeprecatedWebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformSpec) DeepCopyInto(out *EquinixMetalPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformSpec. +func (in *EquinixMetalPlatformSpec) DeepCopy() *EquinixMetalPlatformSpec { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EquinixMetalPlatformStatus) DeepCopyInto(out *EquinixMetalPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EquinixMetalPlatformStatus. +func (in *EquinixMetalPlatformStatus) DeepCopy() *EquinixMetalPlatformStatus { + if in == nil { + return nil + } + out := new(EquinixMetalPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + in.EtcdConnectionInfo.DeepCopyInto(&out.EtcdConnectionInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPConfig) DeepCopyInto(out *ExternalIPConfig) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(ExternalIPPolicy) + (*in).DeepCopyInto(*out) + } + if in.AutoAssignCIDRs != nil { + in, out := &in.AutoAssignCIDRs, &out.AutoAssignCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPConfig. +func (in *ExternalIPConfig) DeepCopy() *ExternalIPConfig { + if in == nil { + return nil + } + out := new(ExternalIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalIPPolicy) DeepCopyInto(out *ExternalIPPolicy) { + *out = *in + if in.AllowedCIDRs != nil { + in, out := &in.AllowedCIDRs, &out.AllowedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RejectedCIDRs != nil { + in, out := &in.RejectedCIDRs, &out.RejectedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalIPPolicy. +func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { + if in == nil { + return nil + } + out := new(ExternalIPPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { + if in == nil { + return nil + } + out := new(FeatureGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. +func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { + if in == nil { + return nil + } + out := new(FeatureGateEnabledDisabled) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureGate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { + if in == nil { + return nil + } + out := new(FeatureGateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSelection) DeepCopyInto(out *FeatureGateSelection) { + *out = *in + if in.CustomNoUpgrade != nil { + in, out := &in.CustomNoUpgrade, &out.CustomNoUpgrade + *out = new(CustomFeatureGates) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSelection. +func (in *FeatureGateSelection) DeepCopy() *FeatureGateSelection { + if in == nil { + return nil + } + out := new(FeatureGateSelection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { + *out = *in + in.FeatureGateSelection.DeepCopyInto(&out.FeatureGateSelection) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { + if in == nil { + return nil + } + out := new(FeatureGateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { + if in == nil { + return nil + } + out := new(FeatureGateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformSpec. +func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { + if in == nil { + return nil + } + out := new(GCPPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPPlatformStatus. +func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { + if in == nil { + return nil + } + out := new(GCPPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + in.StorageConfig.DeepCopyInto(&out.StorageConfig) + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + out.KubeClientConfig = in.KubeClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericAPIServerConfig. +func (in *GenericAPIServerConfig) DeepCopy() *GenericAPIServerConfig { + if in == nil { + return nil + } + out := new(GenericAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericControllerConfig) DeepCopyInto(out *GenericControllerConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + out.LeaderElection = in.LeaderElection + out.Authentication = in.Authentication + out.Authorization = in.Authorization + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericControllerConfig. +func (in *GenericControllerConfig) DeepCopy() *GenericControllerConfig { + if in == nil { + return nil + } + out := new(GenericControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdIdentityProvider) DeepCopyInto(out *HTPasswdIdentityProvider) { + *out = *in + out.FileData = in.FileData + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdIdentityProvider. +func (in *HTPasswdIdentityProvider) DeepCopy() *HTPasswdIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSource) DeepCopyInto(out *HubSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSource. +func (in *HubSource) DeepCopy() *HubSource { + if in == nil { + return nil + } + out := new(HubSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HubSourceStatus) DeepCopyInto(out *HubSourceStatus) { + *out = *in + out.HubSource = in.HubSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HubSourceStatus. +func (in *HubSourceStatus) DeepCopy() *HubSourceStatus { + if in == nil { + return nil + } + out := new(HubSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformSpec) DeepCopyInto(out *IBMCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformSpec. +func (in *IBMCloudPlatformSpec) DeepCopy() *IBMCloudPlatformSpec { + if in == nil { + return nil + } + out := new(IBMCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMCloudPlatformStatus) DeepCopyInto(out *IBMCloudPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMCloudPlatformStatus. +func (in *IBMCloudPlatformStatus) DeepCopy() *IBMCloudPlatformStatus { + if in == nil { + return nil + } + out := new(IBMCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.IdentityProviderConfig.DeepCopyInto(&out.IdentityProviderConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProviderConfig) DeepCopyInto(out *IdentityProviderConfig) { + *out = *in + if in.BasicAuth != nil { + in, out := &in.BasicAuth, &out.BasicAuth + *out = new(BasicAuthIdentityProvider) + **out = **in + } + if in.GitHub != nil { + in, out := &in.GitHub, &out.GitHub + *out = new(GitHubIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.GitLab != nil { + in, out := &in.GitLab, &out.GitLab + *out = new(GitLabIdentityProvider) + **out = **in + } + if in.Google != nil { + in, out := &in.Google, &out.Google + *out = new(GoogleIdentityProvider) + **out = **in + } + if in.HTPasswd != nil { + in, out := &in.HTPasswd, &out.HTPasswd + *out = new(HTPasswdIdentityProvider) + **out = **in + } + if in.Keystone != nil { + in, out := &in.Keystone, &out.Keystone + *out = new(KeystoneIdentityProvider) + **out = **in + } + if in.LDAP != nil { + in, out := &in.LDAP, &out.LDAP + *out = new(LDAPIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.OpenID != nil { + in, out := &in.OpenID, &out.OpenID + *out = new(OpenIDIdentityProvider) + (*in).DeepCopyInto(*out) + } + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderIdentityProvider) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProviderConfig. +func (in *IdentityProviderConfig) DeepCopy() *IdentityProviderConfig { + if in == nil { + return nil + } + out := new(IdentityProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. +func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { + if in == nil { + return nil + } + out := new(ImageContentPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. +func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { + if in == nil { + return nil + } + out := new(ImageContentPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. +func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { + if in == nil { + return nil + } + out := new(ImageContentPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AdditionalTrustedCA = in.AdditionalTrustedCA + in.RegistrySources.DeepCopyInto(&out.RegistrySources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSpec. +func (in *ImageSpec) DeepCopy() *ImageSpec { + if in == nil { + return nil + } + out := new(ImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStatus) DeepCopyInto(out *ImageStatus) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStatus. +func (in *ImageStatus) DeepCopy() *ImageStatus { + if in == nil { + return nil + } + out := new(ImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Infrastructure. +func (in *Infrastructure) DeepCopy() *Infrastructure { + if in == nil { + return nil + } + out := new(Infrastructure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Infrastructure) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureList) DeepCopyInto(out *InfrastructureList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Infrastructure, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureList. +func (in *InfrastructureList) DeepCopy() *InfrastructureList { + if in == nil { + return nil + } + out := new(InfrastructureList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InfrastructureList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) { + *out = *in + out.CloudConfig = in.CloudConfig + in.PlatformSpec.DeepCopyInto(&out.PlatformSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec. +func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { + if in == nil { + return nil + } + out := new(InfrastructureSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { + *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureStatus. +func (in *InfrastructureStatus) DeepCopy() *InfrastructureStatus { + if in == nil { + return nil + } + out := new(InfrastructureStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteSpec, len(*in)) + copy(*out, *in) + } + if in.RequiredHSTSPolicies != nil { + in, out := &in.RequiredHSTSPolicies, &out.RequiredHSTSPolicies + *out = make([]RequiredHSTSPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + if in.ComponentRoutes != nil { + in, out := &in.ComponentRoutes, &out.ComponentRoutes + *out = make([]ComponentRouteStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { + *out = *in + out.OAuthRemoteConnectionInfo = in.OAuthRemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystoneIdentityProvider. +func (in *KeystoneIdentityProvider) DeepCopy() *KeystoneIdentityProvider { + if in == nil { + return nil + } + out := new(KeystoneIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeClientConfig) DeepCopyInto(out *KubeClientConfig) { + *out = *in + out.ConnectionOverrides = in.ConnectionOverrides + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeClientConfig. +func (in *KubeClientConfig) DeepCopy() *KubeClientConfig { + if in == nil { + return nil + } + out := new(KubeClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformSpec) DeepCopyInto(out *KubevirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformSpec. +func (in *KubevirtPlatformSpec) DeepCopy() *KubevirtPlatformSpec { + if in == nil { + return nil + } + out := new(KubevirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubevirtPlatformStatus) DeepCopyInto(out *KubevirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtPlatformStatus. +func (in *KubevirtPlatformStatus) DeepCopy() *KubevirtPlatformStatus { + if in == nil { + return nil + } + out := new(KubevirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPIdentityProvider) DeepCopyInto(out *LDAPIdentityProvider) { + *out = *in + out.BindPassword = in.BindPassword + out.CA = in.CA + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPIdentityProvider. +func (in *LDAPIdentityProvider) DeepCopy() *LDAPIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaderElection) DeepCopyInto(out *LeaderElection) { + *out = *in + out.LeaseDuration = in.LeaseDuration + out.RenewDeadline = in.RenewDeadline + out.RetryPeriod = in.RetryPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderElection. +func (in *LeaderElection) DeepCopy() *LeaderElection { + if in == nil { + return nil + } + out := new(LeaderElection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigration) DeepCopyInto(out *MTUMigration) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(MTUMigrationValues) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigration. +func (in *MTUMigration) DeepCopy() *MTUMigration { + if in == nil { + return nil + } + out := new(MTUMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MTUMigrationValues) DeepCopyInto(out *MTUMigrationValues) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(uint32) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MTUMigrationValues. +func (in *MTUMigrationValues) DeepCopy() *MTUMigrationValues { + if in == nil { + return nil + } + out := new(MTUMigrationValues) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaxAgePolicy) DeepCopyInto(out *MaxAgePolicy) { + *out = *in + if in.LargestMaxAge != nil { + in, out := &in.LargestMaxAge, &out.LargestMaxAge + *out = new(int32) + **out = **in + } + if in.SmallestMaxAge != nil { + in, out := &in.SmallestMaxAge, &out.SmallestMaxAge + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaxAgePolicy. +func (in *MaxAgePolicy) DeepCopy() *MaxAgePolicy { + if in == nil { + return nil + } + out := new(MaxAgePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkMigration) DeepCopyInto(out *NetworkMigration) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(MTUMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkMigration. +func (in *NetworkMigration) DeepCopy() *NetworkMigration { + if in == nil { + return nil + } + out := new(NetworkMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExternalIP != nil { + in, out := &in.ExternalIP, &out.ExternalIP + *out = new(ExternalIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Migration != nil { + in, out := &in.Migration, &out.Migration + *out = new(NetworkMigration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuth) DeepCopyInto(out *OAuth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuth. +func (in *OAuth) DeepCopy() *OAuth { + if in == nil { + return nil + } + out := new(OAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthList) DeepCopyInto(out *OAuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthList. +func (in *OAuthList) DeepCopy() *OAuthList { + if in == nil { + return nil + } + out := new(OAuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRemoteConnectionInfo) DeepCopyInto(out *OAuthRemoteConnectionInfo) { + *out = *in + out.CA = in.CA + out.TLSClientCert = in.TLSClientCert + out.TLSClientKey = in.TLSClientKey + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRemoteConnectionInfo. +func (in *OAuthRemoteConnectionInfo) DeepCopy() *OAuthRemoteConnectionInfo { + if in == nil { + return nil + } + out := new(OAuthRemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthSpec) DeepCopyInto(out *OAuthSpec) { + *out = *in + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + out.Templates = in.Templates + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthSpec. +func (in *OAuthSpec) DeepCopy() *OAuthSpec { + if in == nil { + return nil + } + out := new(OAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthStatus) DeepCopyInto(out *OAuthStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthStatus. +func (in *OAuthStatus) DeepCopy() *OAuthStatus { + if in == nil { + return nil + } + out := new(OAuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + out.Login = in.Login + out.ProviderSelection = in.ProviderSelection + out.Error = in.Error + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]OpenIDClaim, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.ClientSecret = in.ClientSecret + out.CA = in.CA + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformSpec. +func (in *OpenStackPlatformSpec) DeepCopy() *OpenStackPlatformSpec { + if in == nil { + return nil + } + out := new(OpenStackPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformStatus. +func (in *OpenStackPlatformStatus) DeepCopy() *OpenStackPlatformStatus { + if in == nil { + return nil + } + out := new(OpenStackPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperandVersion) DeepCopyInto(out *OperandVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperandVersion. +func (in *OperandVersion) DeepCopy() *OperandVersion { + if in == nil { + return nil + } + out := new(OperandVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHub) DeepCopyInto(out *OperatorHub) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHub. +func (in *OperatorHub) DeepCopy() *OperatorHub { + if in == nil { + return nil + } + out := new(OperatorHub) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHub) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubList) DeepCopyInto(out *OperatorHubList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OperatorHub, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubList. +func (in *OperatorHubList) DeepCopy() *OperatorHubList { + if in == nil { + return nil + } + out := new(OperatorHubList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorHubList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubSpec) DeepCopyInto(out *OperatorHubSpec) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubSpec. +func (in *OperatorHubSpec) DeepCopy() *OperatorHubSpec { + if in == nil { + return nil + } + out := new(OperatorHubSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorHubStatus) DeepCopyInto(out *OperatorHubStatus) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]HubSourceStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorHubStatus. +func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { + if in == nil { + return nil + } + out := new(OperatorHubStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformSpec. +func (in *OvirtPlatformSpec) DeepCopy() *OvirtPlatformSpec { + if in == nil { + return nil + } + out := new(OvirtPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformSpec) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformSpec) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformSpec) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformSpec) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformSpec) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformSpec) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformSpec) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformSpec) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformSpec) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformSpec. +func (in *PlatformSpec) DeepCopy() *PlatformSpec { + if in == nil { + return nil + } + out := new(PlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzurePlatformStatus) + **out = **in + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPPlatformStatus) + **out = **in + } + if in.BareMetal != nil { + in, out := &in.BareMetal, &out.BareMetal + *out = new(BareMetalPlatformStatus) + **out = **in + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = new(OpenStackPlatformStatus) + **out = **in + } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + **out = **in + } + if in.VSphere != nil { + in, out := &in.VSphere, &out.VSphere + *out = new(VSpherePlatformStatus) + **out = **in + } + if in.IBMCloud != nil { + in, out := &in.IBMCloud, &out.IBMCloud + *out = new(IBMCloudPlatformStatus) + **out = **in + } + if in.Kubevirt != nil { + in, out := &in.Kubevirt, &out.Kubevirt + *out = new(KubevirtPlatformStatus) + **out = **in + } + if in.EquinixMetal != nil { + in, out := &in.EquinixMetal, &out.EquinixMetal + *out = new(EquinixMetalPlatformStatus) + **out = **in + } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. +func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { + if in == nil { + return nil + } + out := new(PowerVSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. +func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { + if in == nil { + return nil + } + out := new(PowerVSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + out.ProjectRequestTemplate = in.ProjectRequestTemplate + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. +func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { + if in == nil { + return nil + } + out := new(PromQLClusterCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Proxy) DeepCopyInto(out *Proxy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Proxy. +func (in *Proxy) DeepCopy() *Proxy { + if in == nil { + return nil + } + out := new(Proxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Proxy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyList) DeepCopyInto(out *ProxyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Proxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyList. +func (in *ProxyList) DeepCopy() *ProxyList { + if in == nil { + return nil + } + out := new(ProxyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProxyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.ReadinessEndpoints != nil { + in, out := &in.ReadinessEndpoints, &out.ReadinessEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyStatus) DeepCopyInto(out *ProxyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyStatus. +func (in *ProxyStatus) DeepCopy() *ProxyStatus { + if in == nil { + return nil + } + out := new(ProxyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistrySources) DeepCopyInto(out *RegistrySources) { + *out = *in + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.BlockedRegistries != nil { + in, out := &in.BlockedRegistries, &out.BlockedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedRegistries != nil { + in, out := &in.AllowedRegistries, &out.AllowedRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ContainerRuntimeSearchRegistries != nil { + in, out := &in.ContainerRuntimeSearchRegistries, &out.ContainerRuntimeSearchRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySources. +func (in *RegistrySources) DeepCopy() *RegistrySources { + if in == nil { + return nil + } + out := new(RegistrySources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Release) DeepCopyInto(out *Release) { + *out = *in + if in.Channels != nil { + in, out := &in.Channels, &out.Channels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Release. +func (in *Release) DeepCopy() *Release { + if in == nil { + return nil + } + out := new(Release) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]Mirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.ClientCA = in.ClientCA + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredHSTSPolicy) DeepCopyInto(out *RequiredHSTSPolicy) { + *out = *in + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.DomainPatterns != nil { + in, out := &in.DomainPatterns, &out.DomainPatterns + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.MaxAge.DeepCopyInto(&out.MaxAge) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredHSTSPolicy. +func (in *RequiredHSTSPolicy) DeepCopy() *RequiredHSTSPolicy { + if in == nil { + return nil + } + out := new(RequiredHSTSPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduler) DeepCopyInto(out *Scheduler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduler. +func (in *Scheduler) DeepCopy() *Scheduler { + if in == nil { + return nil + } + out := new(Scheduler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scheduler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerList) DeepCopyInto(out *SchedulerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Scheduler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerList. +func (in *SchedulerList) DeepCopy() *SchedulerList { + if in == nil { + return nil + } + out := new(SchedulerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SchedulerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerSpec) DeepCopyInto(out *SchedulerSpec) { + *out = *in + out.Policy = in.Policy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerSpec. +func (in *SchedulerSpec) DeepCopy() *SchedulerSpec { + if in == nil { + return nil + } + out := new(SchedulerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchedulerStatus) DeepCopyInto(out *SchedulerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerStatus. +func (in *SchedulerStatus) DeepCopy() *SchedulerStatus { + if in == nil { + return nil + } + out := new(SchedulerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretNameReference) DeepCopyInto(out *SecretNameReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretNameReference. +func (in *SecretNameReference) DeepCopy() *SecretNameReference { + if in == nil { + return nil + } + out := new(SecretNameReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateReference. +func (in *TemplateReference) DeepCopy() *TemplateReference { + if in == nil { + return nil + } + out := new(TemplateReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Update) DeepCopyInto(out *Update) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Update. +func (in *Update) DeepCopy() *Update { + if in == nil { + return nil + } + out := new(Update) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateHistory) DeepCopyInto(out *UpdateHistory) { + *out = *in + in.StartedTime.DeepCopyInto(&out.StartedTime) + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateHistory. +func (in *UpdateHistory) DeepCopy() *UpdateHistory { + if in == nil { + return nil + } + out := new(UpdateHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformSpec) DeepCopyInto(out *VSpherePlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformSpec. +func (in *VSpherePlatformSpec) DeepCopy() *VSpherePlatformSpec { + if in == nil { + return nil + } + out := new(VSpherePlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformStatus. +func (in *VSpherePlatformStatus) DeepCopy() *VSpherePlatformStatus { + if in == nil { + return nil + } + out := new(VSpherePlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + out.KubeConfig = in.KubeConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..326fc10bb --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,1859 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AdmissionConfig = map[string]string{ + "enabledPlugins": "enabledPlugins is a list of admission plugins that must be on in addition to the default list. Some admission plugins are disabled by default, but certain configurations require them. This is fairly uncommon and can result in performance penalties and unexpected behavior.", + "disabledPlugins": "disabledPlugins is a list of admission plugins that must be off. Putting something in this list is almost always a mistake and likely to result in cluster instability.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "Location is the path to a configuration file that contains the plugin's configuration", + "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "CertFile is a file containing a PEM-encoded certificate", + "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "acceptContentTypes": "acceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "contentType is the content type used when sending data to the server from this client.", + "qps": "qps controls the number of queries per second allowed for this connection.", + "burst": "burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ConfigMapFileReference = map[string]string{ + "": "ConfigMapFileReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "key": "Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references.", +} + +func (ConfigMapFileReference) SwaggerDoc() map[string]string { + return map_ConfigMapFileReference +} + +var map_ConfigMapNameReference = map[string]string{ + "": "ConfigMapNameReference references a config map in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced config map", +} + +func (ConfigMapNameReference) SwaggerDoc() map[string]string { + return map_ConfigMapNameReference +} + +var map_DelegatedAuthentication = map[string]string{ + "": "DelegatedAuthentication allows authentication to be disabled.", + "disabled": "disabled indicates that authentication should be disabled. By default it will use delegated authentication.", +} + +func (DelegatedAuthentication) SwaggerDoc() map[string]string { + return map_DelegatedAuthentication +} + +var map_DelegatedAuthorization = map[string]string{ + "": "DelegatedAuthorization allows authorization to be disabled.", + "disabled": "disabled indicates that authorization should be disabled. By default it will use delegated authorization.", +} + +func (DelegatedAuthorization) SwaggerDoc() map[string]string { + return map_DelegatedAuthorization +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "URLs are the URLs for etcd", + "ca": "CA is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "storagePrefix": "StoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GenericAPIServerConfig = map[string]string{ + "": "GenericAPIServerConfig is an inline-able struct for aggregated apiservers that need to store data in etcd", + "servingInfo": "servingInfo describes how to start serving", + "corsAllowedOrigins": "corsAllowedOrigins", + "auditConfig": "auditConfig describes how to configure audit information", + "storageConfig": "storageConfig contains information about how to use", + "admission": "admissionConfig holds information about how to configure admission.", +} + +func (GenericAPIServerConfig) SwaggerDoc() map[string]string { + return map_GenericAPIServerConfig +} + +var map_GenericControllerConfig = map[string]string{ + "": "GenericControllerConfig provides information to configure a controller", + "servingInfo": "ServingInfo is the HTTP serving information for the controller's endpoints", + "leaderElection": "leaderElection provides information to elect a leader. Only override this if you have a specific need", + "authentication": "authentication allows configuration of authentication for the endpoints", + "authorization": "authorization allows configuration of authentication for the endpoints", +} + +func (GenericControllerConfig) SwaggerDoc() map[string]string { + return map_GenericControllerConfig +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_KubeClientConfig = map[string]string{ + "kubeConfig": "kubeConfig is a .kubeconfig filename for going to the owning kube-apiserver. Empty uses an in-cluster-config", + "connectionOverrides": "connectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (KubeClientConfig) SwaggerDoc() map[string]string { + return map_KubeClientConfig +} + +var map_LeaderElection = map[string]string{ + "": "LeaderElection provides information to elect a leader", + "disable": "disable allows leader election to be suspended while allowing a fully defaulted \"normal\" startup case.", + "namespace": "namespace indicates which namespace the resource is in", + "name": "name indicates what name to use for the resource", + "leaseDuration": "leaseDuration is the duration that non-leader candidates will wait after observing a leadership renewal until attempting to acquire leadership of a led but unrenewed leader slot. This is effectively the maximum duration that a leader can be stopped before it is replaced by another candidate. This is only applicable if leader election is enabled.", + "renewDeadline": "renewDeadline is the interval between attempts by the acting master to renew a leadership slot before it stops leading. This must be less than or equal to the lease duration. This is only applicable if leader election is enabled.", + "retryPeriod": "retryPeriod is the duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.", +} + +func (LeaderElection) SwaggerDoc() map[string]string { + return map_LeaderElection +} + +var map_MaxAgePolicy = map[string]string{ + "": "MaxAgePolicy contains a numeric range for specifying a compliant HSTS max-age for the enclosing RequiredHSTSPolicy", + "largestMaxAge": "The largest allowed value (in seconds) of the RequiredHSTSPolicy max-age This value can be left unspecified, in which case no upper limit is enforced.", + "smallestMaxAge": "The smallest allowed value (in seconds) of the RequiredHSTSPolicy max-age Setting max-age=0 allows the deletion of an existing HSTS header from a host. This is a necessary tool for administrators to quickly correct mistakes. This value can be left unspecified, in which case no lower limit is enforced.", +} + +func (MaxAgePolicy) SwaggerDoc() map[string]string { + return map_MaxAgePolicy +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "URL is the remote URL to connect to", + "ca": "CA is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequiredHSTSPolicy = map[string]string{ + "namespaceSelector": "namespaceSelector specifies a label selector such that the policy applies only to those routes that are in namespaces with labels that match the selector, and are in one of the DomainPatterns. Defaults to the empty LabelSelector, which matches everything.", + "domainPatterns": "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy.\n\nThe use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*.", + "maxAge": "maxAge is the delta time range in seconds during which hosts are regarded as HSTS hosts. If set to 0, it negates the effect, and hosts are removed as HSTS hosts. If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS policy will eventually expire on that client.", + "preloadPolicy": "preloadPolicy directs the client to include hosts in its host preload list so that it never needs to do an initial load to get the HSTS header (note that this is not defined in RFC 6797 and is therefore client implementation-dependent).", + "includeSubDomainsPolicy": "includeSubDomainsPolicy means the HSTS Policy should apply to any subdomains of the host's domain name. Thus, for the host bar.foo.com, if includeSubDomainsPolicy was set to RequireIncludeSubDomains: - the host app.bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host bar.foo.com would inherit the HSTS Policy of bar.foo.com - the host foo.com would NOT inherit the HSTS Policy of bar.foo.com - the host def.foo.com would NOT inherit the HSTS Policy of bar.foo.com", +} + +func (RequiredHSTSPolicy) SwaggerDoc() map[string]string { + return map_RequiredHSTSPolicy +} + +var map_SecretNameReference = map[string]string{ + "": "SecretNameReference references a secret in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced secret", +} + +func (SecretNameReference) SwaggerDoc() map[string]string { + return map_SecretNameReference +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "BindAddress is the ip:port to serve on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_APIServer = map[string]string{ + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (APIServer) SwaggerDoc() map[string]string { + return map_APIServer +} + +var map_APIServerEncryption = map[string]string{ + "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", +} + +func (APIServerEncryption) SwaggerDoc() map[string]string { + return map_APIServerEncryption +} + +var map_APIServerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (APIServerList) SwaggerDoc() map[string]string { + return map_APIServerList +} + +var map_APIServerNamedServingCert = map[string]string{ + "": "APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate.", + "names": "names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names.", + "servingCertificate": "servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", +} + +func (APIServerNamedServingCert) SwaggerDoc() map[string]string { + return map_APIServerNamedServingCert +} + +var map_APIServerServingCerts = map[string]string{ + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", +} + +func (APIServerServingCerts) SwaggerDoc() map[string]string { + return map_APIServerServingCerts +} + +var map_APIServerSpec = map[string]string{ + "servingCerts": "servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic.", + "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", + "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", + "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12.", + "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", +} + +func (APIServerSpec) SwaggerDoc() map[string]string { + return map_APIServerSpec +} + +var map_Audit = map[string]string{ + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", + "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + +var map_AuditCustomRule = map[string]string{ + "": "AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile.", + "group": "group is a name of group a request user must be member of in order to this profile to apply.", + "profile": "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster.\n\nThe following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nIf unset, the 'Default' profile is used as the default.", +} + +func (AuditCustomRule) SwaggerDoc() map[string]string { + return map_AuditCustomRule +} + +var map_Authentication = map[string]string{ + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Authentication) SwaggerDoc() map[string]string { + return map_Authentication +} + +var map_AuthenticationList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (AuthenticationList) SwaggerDoc() map[string]string { + return map_AuthenticationList +} + +var map_AuthenticationSpec = map[string]string{ + "type": "type identifies the cluster managed, user facing authentication mode in use. Specifically, it manages the component that responds to login attempts. The default is IntegratedOAuth.", + "oauthMetadata": "oauthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for an external OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 If oauthMetadata.name is non-empty, this value has precedence over any metadata reference stored in status. The key \"oauthMetadata\" is used to locate the data. If specified and the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators is DEPRECATED, setting it has no effect.", + "webhookTokenAuthenticator": "webhookTokenAuthenticator configures a remote token reviewer. These remote authentication webhooks can be used to verify bearer tokens via the tokenreviews.authentication.k8s.io REST API. This is required to honor bearer tokens that are provisioned by an external authentication service.", + "serviceAccountIssuer": "serviceAccountIssuer is the identifier of the bound service account token issuer. The default is https://kubernetes.default.svc WARNING: Updating this field will result in the invalidation of all bound tokens with the previous issuer value. Unless the holder of a bound token has explicit support for a change in issuer, they will not request a new bound token until pod restart or until their existing token exceeds 80% of its duration.", +} + +func (AuthenticationSpec) SwaggerDoc() map[string]string { + return map_AuthenticationSpec +} + +var map_AuthenticationStatus = map[string]string{ + "integratedOAuthMetadata": "integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 Authorization Server Metadata for the in-cluster integrated OAuth server. This discovery document can be viewed from its served location: oc get --raw '/.well-known/oauth-authorization-server' For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This contains the observed value based on cluster state. An explicitly set value in spec.oauthMetadata has precedence over this field. This field has no meaning if authentication spec.type is not set to IntegratedOAuth. The key \"oauthMetadata\" is used to locate the data. If the config map or expected key is not found, no metadata is served. If the specified metadata is not valid, no metadata is served. The namespace for this config map is openshift-config-managed.", +} + +func (AuthenticationStatus) SwaggerDoc() map[string]string { + return map_AuthenticationStatus +} + +var map_DeprecatedWebhookTokenAuthenticator = map[string]string{ + "": "deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field.", + "kubeConfig": "kubeConfig contains kube config file data which describes how to access the remote webhook service. For further details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication The key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored. The namespace for this secret is determined by the point of use.", +} + +func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_DeprecatedWebhookTokenAuthenticator +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", + "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +var map_Build = map[string]string{ + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec holds user-settable values for the build controller configuration", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildDefaults = map[string]string{ + "defaultProxy": "DefaultProxy contains the default proxy settings for all build operations, including image pull/push and source download.\n\nValues can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, and `NO_PROXY` environment variables in the build config's strategy.", + "gitProxy": "GitProxy contains the proxy settings for git operations only. If set, this will override any Proxy settings for all git commands, such as git clone.\n\nValues that are not set here will be inherited from DefaultProxy.", + "env": "Env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "resources": "Resources defines resource requirements to execute the build.", +} + +func (BuildDefaults) SwaggerDoc() map[string]string { + return map_BuildDefaults +} + +var map_BuildList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildOverrides = map[string]string{ + "imageLabels": "ImageLabels is a list of docker labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "NodeSelector is a selector which must be true for the build pod to fit on a node", + "tolerations": "Tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", + "forcePull": "ForcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", +} + +func (BuildOverrides) SwaggerDoc() map[string]string { + return map_BuildOverrides +} + +var map_BuildSpec = map[string]string{ + "additionalTrustedCA": "AdditionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted for image pushes and pulls during builds. The namespace for this config map is openshift-config.\n\nDEPRECATED: Additional CAs for image pull and push should be set on image.config.openshift.io/cluster instead.", + "buildDefaults": "BuildDefaults controls the default information for Builds", + "buildOverrides": "BuildOverrides controls override settings for builds", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_ImageLabel = map[string]string{ + "name": "Name defines the name of the label. It must have non-zero length.", + "value": "Value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ClusterOperator = map[string]string{ + "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", +} + +func (ClusterOperator) SwaggerDoc() map[string]string { + return map_ClusterOperator +} + +var map_ClusterOperatorList = map[string]string{ + "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ClusterOperatorList) SwaggerDoc() map[string]string { + return map_ClusterOperatorList +} + +var map_ClusterOperatorSpec = map[string]string{ + "": "ClusterOperatorSpec is empty for now, but you could imagine holding information like \"pause\".", +} + +func (ClusterOperatorSpec) SwaggerDoc() map[string]string { + return map_ClusterOperatorSpec +} + +var map_ClusterOperatorStatus = map[string]string{ + "": "ClusterOperatorStatus provides information about the status of the operator.", + "conditions": "conditions describes the state of the operator's managed and monitored components.", + "versions": "versions is a slice of operator and operand version tuples. Operators which manage multiple operands will have multiple operand entries in the array. Available operators must report the version of the operator itself with the name \"operator\". An operator reports a new \"operator\" version when it has rolled out the new version to all of its operands.", + "relatedObjects": "relatedObjects is a list of objects that are \"interesting\" or related to this operator. Common uses are: 1. the detailed resource driving the operator 2. operator namespaces 3. operand namespaces", + "extension": "extension contains any additional status information specific to the operator which owns this status object.", +} + +func (ClusterOperatorStatus) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatus +} + +var map_ClusterOperatorStatusCondition = map[string]string{ + "": "ClusterOperatorStatusCondition represents the state of the operator's managed and monitored components.", + "type": "type specifies the aspect reported by this condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "lastTransitionTime is the time of the last update to the current status property.", + "reason": "reason is the CamelCase reason for the condition's current status.", + "message": "message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (ClusterOperatorStatusCondition) SwaggerDoc() map[string]string { + return map_ClusterOperatorStatusCondition +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "group": "group of the referent.", + "resource": "resource of the referent.", + "namespace": "namespace of the referent.", + "name": "name of the referent.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_OperandVersion = map[string]string{ + "name": "name is the name of the particular operand this version is for. It usually matches container images, not operators.", + "version": "version indicates which version of a particular operand is currently being managed. It must always match the Available operand. If 1.0.0 is Available, then this must indicate 1.0.0 even if the operator is trying to rollout 1.1.0", +} + +func (OperandVersion) SwaggerDoc() map[string]string { + return map_OperandVersion +} + +var map_ClusterCondition = map[string]string{ + "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", + "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", + "promql": "promQL represents a cluster condition based on PromQL.", +} + +func (ClusterCondition) SwaggerDoc() map[string]string { + return map_ClusterCondition +} + +var map_ClusterVersion = map[string]string{ + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", +} + +func (ClusterVersion) SwaggerDoc() map[string]string { + return map_ClusterVersion +} + +var map_ClusterVersionList = map[string]string{ + "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ClusterVersionList) SwaggerDoc() map[string]string { + return map_ClusterVersionList +} + +var map_ClusterVersionSpec = map[string]string{ + "": "ClusterVersionSpec is the desired version state of the cluster. It includes the version the cluster should be at, how the cluster is identified, and where the cluster should look for version updates.", + "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", + "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail. You may specify the version field without setting image if an update exists with that version in the availableUpdates or history.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", + "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", + "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", +} + +func (ClusterVersionSpec) SwaggerDoc() map[string]string { + return map_ClusterVersionSpec +} + +var map_ClusterVersionStatus = map[string]string{ + "": "ClusterVersionStatus reports the status of the cluster versioning, including any upgrades that are in progress. The current field will be set to whichever version the cluster is reconciling to, and the conditions array will report whether the update succeeded, is in progress, or is failing.", + "desired": "desired is the version that the cluster is reconciling towards. If the cluster is not yet fully initialized desired will be set with the information available, which may be an image or a tag.", + "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", + "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", + "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", +} + +func (ClusterVersionStatus) SwaggerDoc() map[string]string { + return map_ClusterVersionStatus +} + +var map_ComponentOverride = map[string]string{ + "": "ComponentOverride allows overriding cluster version operator's behavior for a component.", + "kind": "kind indentifies which object to override.", + "group": "group identifies the API group that the kind is in.", + "namespace": "namespace is the component's namespace. If the resource is cluster scoped, the namespace should be empty.", + "name": "name is the component's name.", + "unmanaged": "unmanaged controls if cluster version operator should stop managing the resources in this cluster. Default: false", +} + +func (ComponentOverride) SwaggerDoc() map[string]string { + return map_ComponentOverride +} + +var map_ConditionalUpdate = map[string]string{ + "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", + "release": "release is the target of the update.", + "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", + "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.", +} + +func (ConditionalUpdate) SwaggerDoc() map[string]string { + return map_ConditionalUpdate +} + +var map_ConditionalUpdateRisk = map[string]string{ + "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "url": "url contains information about this risk.", + "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", + "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", +} + +func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { + return map_ConditionalUpdateRisk +} + +var map_PromQLClusterCondition = map[string]string{ + "": "PromQLClusterCondition represents a cluster condition based on PromQL.", + "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", +} + +func (PromQLClusterCondition) SwaggerDoc() map[string]string { + return map_PromQLClusterCondition +} + +var map_Release = map[string]string{ + "": "Release represents an OpenShift release image and associated metadata.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "url": "url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases.", + "channels": "channels is the set of Cincinnati channels to which the release currently belongs.", +} + +func (Release) SwaggerDoc() map[string]string { + return map_Release +} + +var map_Update = map[string]string{ + "": "Update represents an administrator update request.", + "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", + "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "force": "force allows an administrator to update to an image that has failed verification or upgradeable checks. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.", +} + +func (Update) SwaggerDoc() map[string]string { + return map_Update +} + +var map_UpdateHistory = map[string]string{ + "": "UpdateHistory is a single attempted update to the cluster.", + "state": "state reflects whether the update was fully applied. The Partial state indicates the update is not fully applied, while the Completed state indicates the update was successfully rolled out at least once (all parts of the update successfully applied).", + "startedTime": "startedTime is the time at which the update was started.", + "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", + "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", + "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", +} + +func (UpdateHistory) SwaggerDoc() map[string]string { + return map_UpdateHistory +} + +var map_Console = map[string]string{ + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Console) SwaggerDoc() map[string]string { + return map_Console +} + +var map_ConsoleAuthentication = map[string]string{ + "": "ConsoleAuthentication defines a list of optional configuration for console authentication.", + "logoutRedirect": "An optional, absolute URL to redirect web browsers to after logging out of the console. If not specified, it will redirect to the default login page. This is required when using an identity provider that supports single sign-on (SSO) such as: - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) - OAuth (GitHub, GitLab, Google) Logging out of the console will destroy the user's token. The logoutRedirect provides the user the option to perform single logout (SLO) through the identity provider to destroy their single sign-on session.", +} + +func (ConsoleAuthentication) SwaggerDoc() map[string]string { + return map_ConsoleAuthentication +} + +var map_ConsoleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ConsoleList) SwaggerDoc() map[string]string { + return map_ConsoleList +} + +var map_ConsoleSpec = map[string]string{ + "": "ConsoleSpec is the specification of the desired behavior of the Console.", +} + +func (ConsoleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSpec +} + +var map_ConsoleStatus = map[string]string{ + "": "ConsoleStatus defines the observed status of the Console.", + "consoleURL": "The URL for the console. This will be derived from the host for the route that is created for the console.", +} + +func (ConsoleStatus) SwaggerDoc() map[string]string { + return map_ConsoleStatus +} + +var map_DNS = map[string]string{ + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (DNS) SwaggerDoc() map[string]string { + return map_DNS +} + +var map_DNSList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (DNSList) SwaggerDoc() map[string]string { + return map_DNSList +} + +var map_DNSSpec = map[string]string{ + "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", + "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", + "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", +} + +func (DNSSpec) SwaggerDoc() map[string]string { + return map_DNSSpec +} + +var map_DNSZone = map[string]string{ + "": "DNSZone is used to define a DNS hosted zone. A zone can be identified by an ID or tags.", + "id": "id is the identifier that can be used to find the DNS hosted zone.\n\non AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3].\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get", + "tags": "tags can be used to query the DNS hosted zone.\n\non AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters,\n\n[1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options", +} + +func (DNSZone) SwaggerDoc() map[string]string { + return map_DNSZone +} + +var map_CustomFeatureGates = map[string]string{ + "enabled": "enabled is a list of all feature gates that you want to force on", + "disabled": "disabled is a list of all feature gates that you want to force off", +} + +func (CustomFeatureGates) SwaggerDoc() map[string]string { + return map_CustomFeatureGates +} + +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate +} + +var map_FeatureGateList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (FeatureGateList) SwaggerDoc() map[string]string { + return map_FeatureGateList +} + +var map_FeatureGateSelection = map[string]string{ + "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", + "customNoUpgrade": "customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations your cluster may fail in an unrecoverable way. featureSet must equal \"CustomNoUpgrade\" must be set to use this field.", +} + +func (FeatureGateSelection) SwaggerDoc() map[string]string { + return map_FeatureGateSelection +} + +var map_Image = map[string]string{ + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageSpec = map[string]string{ + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a reference to a ConfigMap containing additional CAs that should be trusted during imagestream import, pod image pull, build image pull, and imageregistry pullthrough. The namespace for this config map is openshift-config.", + "registrySources": "registrySources contains configuration that determines how the container runtime should treat individual registries when accessing images for builds+pods. (e.g. whether or not to allow insecure access). It does not contain configuration for the internal cluster registry.", +} + +func (ImageSpec) SwaggerDoc() map[string]string { + return map_ImageSpec +} + +var map_ImageStatus = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. This value is set by the image registry operator which controls the internal registry hostname. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (ImageStatus) SwaggerDoc() map[string]string { + return map_ImageStatus +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "domainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RegistrySources = map[string]string{ + "": "RegistrySources holds cluster-wide information about how to handle the registries config.", + "insecureRegistries": "insecureRegistries are registries which do not have a valid TLS certificates or only support HTTP connections.", + "blockedRegistries": "blockedRegistries cannot be used for image pull and push actions. All other registries are permitted.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "allowedRegistries": "allowedRegistries are the only registries permitted for image pull and push actions. All other registries are denied.\n\nOnly one of BlockedRegistries or AllowedRegistries may be set.", + "containerRuntimeSearchRegistries": "containerRuntimeSearchRegistries are registries that will be searched when pulling images that do not have fully qualified domains in their pull specs. Registries will be searched in the order provided in the list. Note: this search list only works with the container runtime, i.e CRI-O. Will NOT work with builds or imagestream imports.", +} + +func (RegistrySources) SwaggerDoc() map[string]string { + return map_RegistrySources +} + +var map_ImageContentPolicy = map[string]string{ + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentPolicy) SwaggerDoc() map[string]string { + return map_ImageContentPolicy +} + +var map_ImageContentPolicyList = map[string]string{ + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageContentPolicyList) SwaggerDoc() map[string]string { + return map_ImageContentPolicyList +} + +var map_ImageContentPolicySpec = map[string]string{ + "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentPolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentPolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", + "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + +var map_AWSPlatformSpec = map[string]string{ + "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", +} + +func (AWSPlatformSpec) SwaggerDoc() map[string]string { + return map_AWSPlatformSpec +} + +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", + "serviceEndpoints": "ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", + "resourceTags": "resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + +var map_AWSResourceTag = map[string]string{ + "": "AWSResourceTag is a tag to apply to AWS resources created for the cluster.", + "key": "key is the key of the tag", + "value": "value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services.", +} + +func (AWSResourceTag) SwaggerDoc() map[string]string { + return map_AWSResourceTag +} + +var map_AWSServiceEndpoint = map[string]string{ + "": "AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services.", + "name": "name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (AWSServiceEndpoint) SwaggerDoc() map[string]string { + return map_AWSServiceEndpoint +} + +var map_AlibabaCloudPlatformSpec = map[string]string{ + "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformSpec +} + +var map_AlibabaCloudPlatformStatus = map[string]string{ + "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", + "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", + "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", + "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", +} + +func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformStatus +} + +var map_AlibabaCloudResourceTag = map[string]string{ + "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", + "key": "key is the key of the tag.", + "value": "value is the value of the tag.", +} + +func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { + return map_AlibabaCloudResourceTag +} + +var map_AzurePlatformSpec = map[string]string{ + "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AzurePlatformSpec) SwaggerDoc() map[string]string { + return map_AzurePlatformSpec +} + +var map_AzurePlatformStatus = map[string]string{ + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", + "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", + "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", +} + +func (AzurePlatformStatus) SwaggerDoc() map[string]string { + return map_AzurePlatformStatus +} + +var map_BareMetalPlatformSpec = map[string]string{ + "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (BareMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_BareMetalPlatformSpec +} + +var map_BareMetalPlatformStatus = map[string]string{ + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_BareMetalPlatformStatus +} + +var map_EquinixMetalPlatformSpec = map[string]string{ + "": "EquinixMetalPlatformSpec holds the desired state of the Equinix Metal infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (EquinixMetalPlatformSpec) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformSpec +} + +var map_EquinixMetalPlatformStatus = map[string]string{ + "": "EquinixMetalPlatformStatus holds the current status of the Equinix Metal infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { + return map_EquinixMetalPlatformStatus +} + +var map_GCPPlatformSpec = map[string]string{ + "": "GCPPlatformSpec holds the desired state of the Google Cloud Platform infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (GCPPlatformSpec) SwaggerDoc() map[string]string { + return map_GCPPlatformSpec +} + +var map_GCPPlatformStatus = map[string]string{ + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", +} + +func (GCPPlatformStatus) SwaggerDoc() map[string]string { + return map_GCPPlatformStatus +} + +var map_IBMCloudPlatformSpec = map[string]string{ + "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformSpec +} + +var map_IBMCloudPlatformStatus = map[string]string{ + "": "IBMCloudPlatformStatus holds the current status of the IBMCloud infrastructure provider.", + "location": "Location is where the cluster has been deployed", + "resourceGroupName": "ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster.", + "providerType": "ProviderType indicates the type of cluster that was created", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", +} + +func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_IBMCloudPlatformStatus +} + +var map_Infrastructure = map[string]string{ + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Infrastructure) SwaggerDoc() map[string]string { + return map_Infrastructure +} + +var map_InfrastructureList = map[string]string{ + "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (InfrastructureList) SwaggerDoc() map[string]string { + return map_InfrastructureList +} + +var map_InfrastructureSpec = map[string]string{ + "": "InfrastructureSpec contains settings that apply to the cluster infrastructure.", + "cloudConfig": "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config.\n\ncloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only.", + "platformSpec": "platformSpec holds desired information specific to the underlying infrastructure provider.", +} + +func (InfrastructureSpec) SwaggerDoc() map[string]string { + return map_InfrastructureSpec +} + +var map_InfrastructureStatus = map[string]string{ + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.", + "apiServerURL": "apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", + "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", + "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", +} + +func (InfrastructureStatus) SwaggerDoc() map[string]string { + return map_InfrastructureStatus +} + +var map_KubevirtPlatformSpec = map[string]string{ + "": "KubevirtPlatformSpec holds the desired state of the kubevirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (KubevirtPlatformSpec) SwaggerDoc() map[string]string { + return map_KubevirtPlatformSpec +} + +var map_KubevirtPlatformStatus = map[string]string{ + "": "KubevirtPlatformStatus holds the current status of the kubevirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", +} + +func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { + return map_KubevirtPlatformStatus +} + +var map_OpenStackPlatformSpec = map[string]string{ + "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OpenStackPlatformSpec) SwaggerDoc() map[string]string { + return map_OpenStackPlatformSpec +} + +var map_OpenStackPlatformStatus = map[string]string{ + "": "OpenStackPlatformStatus holds the current status of the OpenStack infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "cloudName": "cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`).", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { + return map_OpenStackPlatformStatus +} + +var map_OvirtPlatformSpec = map[string]string{ + "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (OvirtPlatformSpec) SwaggerDoc() map[string]string { + return map_OvirtPlatformSpec +} + +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + +var map_PlatformSpec = map[string]string{ + "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", +} + +func (PlatformSpec) SwaggerDoc() map[string]string { + return map_PlatformSpec +} + +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", + "azure": "Azure contains settings specific to the Azure infrastructure provider.", + "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", + "baremetal": "BareMetal contains settings specific to the BareMetal platform.", + "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", + "vsphere": "VSphere contains settings specific to the VSphere infrastructure provider.", + "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", + "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", + "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + +var map_PowerVSPlatformSpec = map[string]string{ + "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", +} + +func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { + return map_PowerVSPlatformSpec +} + +var map_PowerVSPlatformStatus = map[string]string{ + "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", + "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", + "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", +} + +func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { + return map_PowerVSPlatformStatus +} + +var map_PowerVSServiceEndpoint = map[string]string{ + "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", + "name": "name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { + return map_PowerVSServiceEndpoint +} + +var map_VSpherePlatformSpec = map[string]string{ + "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (VSpherePlatformSpec) SwaggerDoc() map[string]string { + return map_VSpherePlatformSpec +} + +var map_VSpherePlatformStatus = map[string]string{ + "": "VSpherePlatformStatus holds the current status of the vSphere infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (VSpherePlatformStatus) SwaggerDoc() map[string]string { + return map_VSpherePlatformStatus +} + +var map_ComponentRouteSpec = map[string]string{ + "": "ComponentRouteSpec allows for configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of status.componentRoutes if the route is to be customized.", + "hostname": "hostname is the hostname that should be used by the route.", + "servingCertKeyPairSecret": "servingCertKeyPairSecret is a reference to a secret of type `kubernetes.io/tls` in the openshift-config namespace. The serving cert/key pair must match and will be used by the operator to fulfill the intent of serving with this name. If the custom hostname uses the default routing suffix of the cluster, the Secret specification for a serving certificate will not be needed.", +} + +func (ComponentRouteSpec) SwaggerDoc() map[string]string { + return map_ComponentRouteSpec +} + +var map_ComponentRouteStatus = map[string]string{ + "": "ComponentRouteStatus contains information allowing configuration of a route's hostname and serving certificate.", + "namespace": "namespace is the namespace of the route to customize. It must be a real namespace. Using an actual namespace ensures that no two components will conflict and the same component can be installed multiple times.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "name": "name is the logical name of the route to customize. It does not have to be the actual name of a route resource but it cannot be renamed.\n\nThe namespace and name of this componentRoute must match a corresponding entry in the list of spec.componentRoutes if the route is to be customized.", + "defaultHostname": "defaultHostname is the hostname of this route prior to customization.", + "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the servingCertKeyPairSecret secret.", + "currentHostnames": "currentHostnames is the list of current names used by the route. Typically, this list should consist of a single hostname, but if multiple hostnames are supported by the route the operator may write multiple entries to this list.", + "conditions": "conditions are used to communicate the state of the componentRoutes entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf available is true, the content served by the route can be accessed by users. This includes cases where a default may continue to serve content while the customized route specified by the cluster-admin is being configured.\n\nIf Degraded is true, that means something has gone wrong trying to handle the componentRoutes entry. The currentHostnames field may or may not be in effect.\n\nIf Progressing is true, that means the component is taking some action related to the componentRoutes entry.", + "relatedObjects": "relatedObjects is a list of resources which are useful when debugging or inspecting how spec.componentRoutes is applied.", +} + +func (ComponentRouteStatus) SwaggerDoc() map[string]string { + return map_ComponentRouteStatus +} + +var map_Ingress = map[string]string{ + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressSpec = map[string]string{ + "domain": "domain is used to generate a default host name for a route when the route's host name is empty. The generated host name will follow this pattern: \"..\".\n\nIt is also used as the default wildcard domain suffix for ingress. The default ingresscontroller domain will follow this pattern: \"*.\".\n\nOnce set, changing domain is not currently supported.", + "appsDomain": "appsDomain is an optional domain to use instead of the one specified in the domain field when a Route is created without specifying an explicit host. If appsDomain is nonempty, this value is used to generate default host values for Route. Unlike domain, appsDomain may be modified after installation. This assumes a new ingresscontroller has been setup with a wildcard certificate.", + "componentRoutes": "componentRoutes is an optional list of routes that are managed by OpenShift components that a cluster-admin is able to configure the hostname and serving certificate for. The namespace and name of each route in this list should match an existing entry in the status.componentRoutes list.\n\nTo determine the set of configurable Routes, look at namespace and name of entries in the .status.componentRoutes list, where participating operators write the status of configurable routes.", + "requiredHSTSPolicies": "requiredHSTSPolicies specifies HSTS policies that are required to be set on newly created or updated routes matching the domainPattern/s and namespaceSelector/s that are specified in the policy. Each requiredHSTSPolicy must have at least a domainPattern and a maxAge to validate a route HSTS Policy route annotation, and affect route admission.\n\nA candidate route is checked for HSTS Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains\n\n- For each candidate route, if it matches a requiredHSTSPolicy domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, and includeSubdomainsPolicy must be valid to be admitted. Otherwise, the route is rejected. - The first match, by domainPattern and optional namespaceSelector, in the ordering of the RequiredHSTSPolicies determines the route's admission status. - If the candidate route doesn't match any requiredHSTSPolicy domainPattern and optional namespaceSelector, then it may use any HSTS Policy annotation.\n\nThe HSTS policy configuration may be changed after routes have already been created. An update to a previously admitted route may then fail if the updated route does not conform to the updated HSTS policy configuration. However, changing the HSTS policy configuration will not cause a route that is already admitted to stop working.\n\nNote that if there are no RequiredHSTSPolicies, any HSTS Policy annotation on the route is valid.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "componentRoutes": "componentRoutes is where participating operators place the current route status for routes whose hostnames and serving certificates can be customized by the cluster-admin.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", + "cidr": "The complete block for pod IPs.", + "hostPrefix": "The size (prefix) of block to allocate to each node. If this field is not used by the plugin, it can be left unset.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ExternalIPConfig = map[string]string{ + "": "ExternalIPConfig specifies some IP blocks relevant for the ExternalIP field of a Service resource.", + "policy": "policy is a set of restrictions applied to the ExternalIP field. If nil or empty, then ExternalIP is not allowed to be set.", + "autoAssignCIDRs": "autoAssignCIDRs is a list of CIDRs from which to automatically assign Service.ExternalIP. These are assigned when the service is of type LoadBalancer. In general, this is only useful for bare-metal clusters. In Openshift 3.x, this was misleadingly called \"IngressIPs\". Automatically assigned External IPs are not affected by any ExternalIPPolicy rules. Currently, only one entry may be provided.", +} + +func (ExternalIPConfig) SwaggerDoc() map[string]string { + return map_ExternalIPConfig +} + +var map_ExternalIPPolicy = map[string]string{ + "": "ExternalIPPolicy configures exactly which IPs are allowed for the ExternalIP field in a Service. If the zero struct is supplied, then none are permitted. The policy controller always allows automatically assigned external IPs.", + "allowedCIDRs": "allowedCIDRs is the list of allowed CIDRs.", + "rejectedCIDRs": "rejectedCIDRs is the list of disallowed CIDRs. These take precedence over allowedCIDRs.", +} + +func (ExternalIPPolicy) SwaggerDoc() map[string]string { + return map_ExternalIPPolicy +} + +var map_MTUMigration = map[string]string{ + "": "MTUMigration contains infomation about MTU migration.", + "network": "Network contains MTU migration configuration for the default network.", + "machine": "Machine contains MTU migration configuration for the machine's uplink.", +} + +func (MTUMigration) SwaggerDoc() map[string]string { + return map_MTUMigration +} + +var map_MTUMigrationValues = map[string]string{ + "": "MTUMigrationValues contains the values for a MTU migration.", + "to": "To is the MTU to migrate to.", + "from": "From is the MTU to migrate from.", +} + +func (MTUMigrationValues) SwaggerDoc() map[string]string { + return map_MTUMigrationValues +} + +var map_Network = map[string]string{ + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkMigration = map[string]string{ + "": "NetworkMigration represents the cluster network configuration.", + "networkType": "NetworkType is the target plugin that is to be deployed. Currently supported values are: OpenShiftSDN, OVNKubernetes", + "mtu": "MTU contains the MTU migration configuration.", +} + +func (NetworkMigration) SwaggerDoc() map[string]string { + return map_NetworkMigration +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the desired network configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "clusterNetwork": "IP address pool to use for pod IPs. This field is immutable after installation.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here. This field is immutable after installation.", + "networkType": "NetworkType is the plugin that is to be deployed (e.g. OpenShiftSDN). This should match a value that the cluster-network-operator understands, or else no networking will be installed. Currently supported values are: - OpenShiftSDN This field is immutable after installation.", + "externalIP": "externalIP defines configuration for controllers that affect Service.ExternalIP. If nil, then ExternalIP is not allowed to be set.", + "serviceNodePortRange": "The port range allowed for Services of type NodePort. If not specified, the default of 30000-32767 will be used. Such Services without a NodePort specified will have one automatically allocated from this range. This parameter can be updated after the cluster is installed.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is the current network configuration.", + "clusterNetwork": "IP address pool to use for pod IPs.", + "serviceNetwork": "IP address pool for services. Currently, we only support a single entry here.", + "networkType": "NetworkType is the plugin that is deployed (e.g. OpenShiftSDN).", + "clusterNetworkMTU": "ClusterNetworkMTU is the MTU for inter-pod networking.", + "migration": "Migration contains the cluster network migration configuration.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_BasicAuthIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials", +} + +func (BasicAuthIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value configured at /setup/settings#hostname.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. The namespace for this config map is openshift-config.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "url": "url is the oauth server base URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_HTPasswdIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials", + "fileData": "fileData is a required reference to a secret by name containing the data to use as the htpasswd file. The key \"htpasswd\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. If the specified htpasswd data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (HTPasswdIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider. - It MUST be unique and not shared by any other identity provider used - It MUST be a valid path segment: name cannot equal \".\" or \"..\" or contain \"/\" or \"%\" or \":\"\n Ref: https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users Defaults to \"claim\"", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_IdentityProviderConfig = map[string]string{ + "": "IdentityProviderConfig contains configuration for using a specific identity provider", + "type": "type identifies the identity provider type for this entry.", + "basicAuth": "basicAuth contains configuration options for the BasicAuth IdP", + "github": "github enables user authentication using GitHub credentials", + "gitlab": "gitlab enables user authentication using GitLab credentials", + "google": "google enables user authentication using Google credentials", + "htpasswd": "htpasswd enables user authentication using an HTPasswd file to validate credentials", + "keystone": "keystone enables user authentication using keystone password credentials", + "ldap": "ldap enables user authentication using LDAP credentials", + "openID": "openID enables user authentication using OpenID credentials", + "requestHeader": "requestHeader enables user authentication using request header credentials", +} + +func (IdentityProviderConfig) SwaggerDoc() map[string]string { + return map_IdentityProviderConfig +} + +var map_KeystoneIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials", + "domainName": "domainName is required for keystone v3", +} + +func (KeystoneIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystoneIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. First non-empty attribute is used. At least one attribute is required. If none of the listed attribute have a value, authentication fails. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional reference to a secret by name containing a password to bind with during the search phase. The key \"bindPassword\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "insecure": "insecure, if true, indicates the connection should not use TLS WARNING: Should not be set to `true` with the URL scheme \"ldaps://\" as \"ldaps://\" URLs always\n attempt to connect using TLS, even when `insecure` is set to `true`\nWhen `true`, \"ldap://\" URLS connect insecurely. When `false`, \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPIdentityProvider +} + +var map_OAuth = map[string]string{ + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OAuth) SwaggerDoc() map[string]string { + return map_OAuth +} + +var map_OAuthList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OAuthList) SwaggerDoc() map[string]string { + return map_OAuthList +} + +var map_OAuthRemoteConnectionInfo = map[string]string{ + "": "OAuthRemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "url is the remote URL to connect to", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientCert": "tlsClientCert is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate to present when connecting to the server. The key \"tls.crt\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", + "tlsClientKey": "tlsClientKey is an optional reference to a secret by name that contains the PEM-encoded TLS private key for the client certificate referenced in tlsClientCert. The key \"tls.key\" is used to locate the data. If specified and the secret or expected key is not found, the identity provider is not honored. If the specified certificate data is not valid, the identity provider is not honored. The namespace for this secret is openshift-config.", +} + +func (OAuthRemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_OAuthRemoteConnectionInfo +} + +var map_OAuthSpec = map[string]string{ + "": "OAuthSpec contains desired cluster auth configuration", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves. When this list is empty, no identities are provisioned for users.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthSpec) SwaggerDoc() map[string]string { + return map_OAuthSpec +} + +var map_OAuthStatus = map[string]string{ + "": "OAuthStatus shows current known state of OAuth server in the cluster", +} + +func (OAuthStatus) SwaggerDoc() map[string]string { + return map_OAuthStatus +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is the name of a secret that specifies a go template to use to render the login page. The key \"login.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default login page is used. If the specified template is not valid, the default login page is used. If unspecified, the default login page is used. The namespace for this secret is openshift-config.", + "providerSelection": "providerSelection is the name of a secret that specifies a go template to use to render the provider selection page. The key \"providers.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default provider selection page is used. If the specified template is not valid, the default provider selection page is used. If unspecified, the default provider selection page is used. The namespace for this secret is openshift-config.", + "error": "error is the name of a secret that specifies a go template to use to render error pages during the authentication or grant flow. The key \"errors.html\" is used to locate the template data. If specified and the secret or expected key is not found, the default error page is used. If the specified template is not valid, the default error page is used. If unspecified, the default error page is used. The namespace for this secret is openshift-config.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is a required reference to the secret by name containing the oauth client secret. The key \"clientSecret\" is used to locate the data. If the secret or expected key is not found, the identity provider is not honored. The namespace for this secret is openshift-config.", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca.crt\" is used to locate the data. If specified and the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "issuer": "issuer is the URL that the OpenID Provider asserts as its Issuer Identifier. It must use the https scheme with no query or fragment component.", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when login is set to true.", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here. ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}\nRequired when challenge is set to true.", + "ca": "ca is a required reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. Specifically, it allows verification of incoming requests to prevent header spoofing. The key \"ca.crt\" is used to locate the data. If the config map or expected key is not found, the identity provider is not honored. If the specified ca data is not valid, the identity provider is not honored. The namespace for this config map is openshift-config.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_HubSource = map[string]string{ + "": "HubSource is used to specify the hub source and its configuration", + "name": "name is the name of one of the default hub sources", + "disabled": "disabled is used to disable a default hub source on cluster", +} + +func (HubSource) SwaggerDoc() map[string]string { + return map_HubSource +} + +var map_HubSourceStatus = map[string]string{ + "": "HubSourceStatus is used to reflect the current state of applying the configuration to a default source", + "status": "status indicates success or failure in applying the configuration", + "message": "message provides more information regarding failures", +} + +func (HubSourceStatus) SwaggerDoc() map[string]string { + return map_HubSourceStatus +} + +var map_OperatorHub = map[string]string{ + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OperatorHub) SwaggerDoc() map[string]string { + return map_OperatorHub +} + +var map_OperatorHubList = map[string]string{ + "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (OperatorHubList) SwaggerDoc() map[string]string { + return map_OperatorHubList +} + +var map_OperatorHubSpec = map[string]string{ + "": "OperatorHubSpec defines the desired state of OperatorHub", + "disableAllDefaultSources": "disableAllDefaultSources allows you to disable all the default hub sources. If this is true, a specific entry in sources can be used to enable a default source. If this is false, a specific entry in sources can be used to disable or enable a default source.", + "sources": "sources is the list of default hub sources and their configuration. If the list is empty, it implies that the default hub sources are enabled on the cluster unless disableAllDefaultSources is true. If disableAllDefaultSources is true and sources is not empty, the configuration present in sources will take precedence. The list of default hub sources and their current state will always be reflected in the status block.", +} + +func (OperatorHubSpec) SwaggerDoc() map[string]string { + return map_OperatorHubSpec +} + +var map_OperatorHubStatus = map[string]string{ + "": "OperatorHubStatus defines the observed state of OperatorHub. The current state of the default hub sources will always be reflected here.", + "sources": "sources encapsulates the result of applying the configuration for each hub source", +} + +func (OperatorHubStatus) SwaggerDoc() map[string]string { + return map_OperatorHubStatus +} + +var map_Project = map[string]string{ + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec holds the project creation configuration.", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. This must point to a template in 'openshift-config' namespace. It is optional. If it is not specified, a default template is used.", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_TemplateReference = map[string]string{ + "": "TemplateReference references a template in a specific namespace. The namespace must be specified at the point of use.", + "name": "name is the metadata.name of the referenced project request template", +} + +func (TemplateReference) SwaggerDoc() map[string]string { + return map_TemplateReference +} + +var map_Proxy = map[string]string{ + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Proxy) SwaggerDoc() map[string]string { + return map_Proxy +} + +var map_ProxyList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ProxyList) SwaggerDoc() map[string]string { + return map_ProxyList +} + +var map_ProxySpec = map[string]string{ + "": "ProxySpec contains cluster proxy creation configuration.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", + "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", +} + +func (ProxySpec) SwaggerDoc() map[string]string { + return map_ProxySpec +} + +var map_ProxyStatus = map[string]string{ + "": "ProxyStatus shows current known state of the cluster proxy.", + "httpProxy": "httpProxy is the URL of the proxy for HTTP requests.", + "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used.", +} + +func (ProxyStatus) SwaggerDoc() map[string]string { + return map_ProxyStatus +} + +var map_Scheduler = map[string]string{ + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Scheduler) SwaggerDoc() map[string]string { + return map_Scheduler +} + +var map_SchedulerList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (SchedulerList) SwaggerDoc() map[string]string { + return map_SchedulerList +} + +var map_SchedulerSpec = map[string]string{ + "policy": "DEPRECATED: the scheduler Policy API has been deprecated and will be removed in a future release. policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "profile": "profile sets which scheduling profile should be set in order to configure scheduling decisions for new pods.\n\nValid values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" Defaults to \"LowNodeUtilization\"", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces and creates an intersection with any existing nodeSelectors already set on a pod, additionally constraining that pod's selector. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + "mastersSchedulable": "MastersSchedulable allows masters nodes to be schedulable. When this flag is turned on, all the master nodes in the cluster will be made schedulable, so that workload pods can run on them. The default value for this field is false, meaning none of the master nodes are schedulable. Important Note: Once the workload pods start running on the master nodes, extreme care must be taken to ensure that cluster-critical control plane components are not impacted. Please turn on this field after doing due diligence.", +} + +func (SchedulerSpec) SwaggerDoc() map[string]string { + return map_SchedulerSpec +} + +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1\n\nNOTE: currently the highest minTLSVersion allowed is VersionTLS12", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.\n\nNote that the Modern profile is currently not supported because it is not yet well adopted by common software libraries.", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n - DHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-ECDSA-AES256-SHA384\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - DHE-RSA-AES128-SHA256\n - DHE-RSA-AES256-SHA256\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES256-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - DHE-RSA-AES128-GCM-SHA256\n - DHE-RSA-AES256-GCM-SHA384\n minTLSVersion: TLSv1.2", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3\n\nNOTE: Currently unsupported.", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/custom-resource-status/LICENSE b/vendor/github.com/openshift/custom-resource-status/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go new file mode 100644 index 000000000..bbeee804a --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go @@ -0,0 +1,104 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + newCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message + existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now()) +} + +// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition +// without setting lastHeartbeatTime. +func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) { + if conditions == nil { + conditions = &[]Condition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) { + if conditions == nil { + return + } + newConditions := []Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue` +func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse` +func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse) +} + +// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown` +func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go new file mode 100644 index 000000000..b657efeaa --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1 provides version v1 of the types and functions necessary to +// manage and inspect a slice of conditions. It is opinionated in the +// condition types provided but leaves it to the user to define additional +// types as necessary. +package v1 diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go new file mode 100644 index 000000000..950678fb9 --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go @@ -0,0 +1,51 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Condition represents the state of the operator's +// reconciliation functionality. +// +k8s:deepcopy-gen=true +type Condition struct { + Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."` + + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + + // +optional + Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"` + + // +optional + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` + + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"` + + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"` +} + +// ConditionType is the state of the operator's reconciliation functionality. +type ConditionType string + +const ( + // ConditionAvailable indicates that the resources maintained by the operator, + // is functional and available in the cluster. + ConditionAvailable ConditionType = "Available" + + // ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the + // operator + ConditionProgressing ConditionType = "Progressing" + + // ConditionDegraded indicates that the resources maintained by the operator are not functioning completely. + // An example of a degraded state would be if not all pods in a deployment were running. + // It may still be available, but it is degraded + ConditionDegraded ConditionType = "Degraded" + + // ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade. + // When `False`, the resources maintained by the operator should not be upgraded and the + // message field should contain a human readable description of what the administrator should do to + // allow the operator to successfully update the resources maintained by the operator. + ConditionUpgradeable ConditionType = "Upgradeable" +) diff --git a/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..bbbbf863d --- /dev/null +++ b/vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go @@ -0,0 +1,23 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml new file mode 100644 index 000000000..3deb4a124 --- /dev/null +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - "1.9" + - "1.10" + - "1.11" + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b382a04ed --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 000000000..810ad40dc --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,15 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100644 index 000000000..50a0f2d09 --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100644 index 000000000..727d76167 --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,13 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// This package is a partial wrapper around the github.com/google/uuid package. +// This package represents a UUID as []byte while github.com/google/uuid +// represents a UUID as [16]byte. +package uuid diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 000000000..a0420c1ef --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go new file mode 100644 index 000000000..35b89352a --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -0,0 +1,85 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" + + guuid "github.com/google/uuid" +) + +// MarshalText implements encoding.TextMarshaler. +func (u UUID) MarshalText() ([]byte, error) { + if len(u) != 16 { + return nil, nil + } + var js [36]byte + encodeHex(js[:], u) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *UUID) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") + } + *u = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u UUID) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + var id [16]byte + copy(id[:], data) + *u = id[:] + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (u Array) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], u[:]) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *Array) UnmarshalText(data []byte) error { + id, err := guuid.ParseBytes(data) + if err != nil { + return err + } + *u = Array(id) + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u Array) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *Array) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(u[:], data) + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100644 index 000000000..e524e0101 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,50 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return guuid.NodeInterface() +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + return guuid.SetNodeInterface(name) +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + return guuid.NodeID() +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + return guuid.SetNodeID(id) +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 000000000..929c3847e --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + parsed := make([]byte, 16) + copy(parsed, b) + *uuid = UUID(parsed) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100644 index 000000000..5c0960d87 --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,57 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + + guuid "github.com/google/uuid" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time = guuid.Time + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { return guuid.GetTime() } + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { return guuid.ClockSequence() } + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 000000000..255b5e248 --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,32 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 000000000..337000420 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,162 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "io" + + guuid "github.com/google/uuid" +) + +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return guuid.UUID(uuid).String() +} + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version = guuid.Version + +// A Variant represents a UUIDs variant. +type Variant = guuid.Variant + +// Constants returned by Variant. +const ( + Invalid = guuid.Invalid // Invalid UUID + RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 + Reserved = guuid.Reserved // Reserved, NCS backward compatibility. + Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future = guuid.Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for +// the formats parsed. +func Parse(s string) UUID { + gu, err := guuid.Parse(s) + if err == nil { + return gu[:] + } + return nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + gu, err := guuid.ParseBytes(b) + if err == nil { + return gu[:], nil + } + return nil, err +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + guuid.SetRand(r) +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 000000000..7af948da7 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,23 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + gu, err := guuid.NewUUID() + if err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 000000000..b459d46d1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,26 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import guuid "github.com/google/uuid" + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + if gu, err := guuid.NewRandom(); err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/kubevirt.io/api/LICENSE b/vendor/kubevirt.io/api/LICENSE new file mode 100644 index 000000000..549d874d4 --- /dev/null +++ b/vendor/kubevirt.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 The KubeVirt Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/kubevirt.io/api/core/register.go b/vendor/kubevirt.io/api/core/register.go new file mode 100644 index 000000000..22080c717 --- /dev/null +++ b/vendor/kubevirt.io/api/core/register.go @@ -0,0 +1,4 @@ +package core + +// GroupName is the group name use in this package +const GroupName = "kubevirt.io" diff --git a/vendor/kubevirt.io/api/core/v1/componentconfig.go b/vendor/kubevirt.io/api/core/v1/componentconfig.go new file mode 100644 index 000000000..4d26dbd2f --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/componentconfig.go @@ -0,0 +1,48 @@ +package v1 + +// This code is copied from +// https://github.com/kubevirt/controller-lifecycle-operator-sdk/blob/master/pkg/sdk/api/types.go +// in order to avoid dependency loops + +import ( + corev1 "k8s.io/api/core/v1" +) + +// NodePlacement describes node scheduling configuration. +type NodePlacement struct { + // nodeSelector is the node selector applied to the relevant kind of pods + // It specifies a map of key-value pairs: for the pod to be eligible to run on a node, + // the node must have each of the indicated key-value pairs as labels + // (it can have additional labels as well). + // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + // +kubebuilder:validation:Optional + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // affinity enables pod affinity/anti-affinity placement expanding the types of constraints + // that can be expressed with nodeSelector. + // affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector + // See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + // +kubebuilder:validation:Optional + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // tolerations is a list of tolerations applied to the relevant kind of pods + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. + // These are additional tolerations other than default ones. + // +kubebuilder:validation:Optional + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +type ComponentConfig struct { + // nodePlacement describes scheduling configuration for specific + // KubeVirt components + //+optional + NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + // replicas indicates how many replicas should be created for each KubeVirt infrastructure + // component (like virt-api or virt-controller). Defaults to 2. + // WARNING: this is an advanced feature that prevents auto-scaling for core kubevirt components. Please use with caution! + //+optional + Replicas *uint8 `json:"replicas,omitempty"` +} diff --git a/vendor/kubevirt.io/api/core/v1/deepcopy_generated.go b/vendor/kubevirt.io/api/core/v1/deepcopy_generated.go new file mode 100644 index 000000000..fedd73348 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/deepcopy_generated.go @@ -0,0 +1,5304 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 The KubeVirt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + v1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessCredential) DeepCopyInto(out *AccessCredential) { + *out = *in + if in.SSHPublicKey != nil { + in, out := &in.SSHPublicKey, &out.SSHPublicKey + *out = new(SSHPublicKeyAccessCredential) + (*in).DeepCopyInto(*out) + } + if in.UserPassword != nil { + in, out := &in.UserPassword, &out.UserPassword + *out = new(UserPasswordAccessCredential) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessCredential. +func (in *AccessCredential) DeepCopy() *AccessCredential { + if in == nil { + return nil + } + out := new(AccessCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessCredentialSecretSource) DeepCopyInto(out *AccessCredentialSecretSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessCredentialSecretSource. +func (in *AccessCredentialSecretSource) DeepCopy() *AccessCredentialSecretSource { + if in == nil { + return nil + } + out := new(AccessCredentialSecretSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddVolumeOptions) DeepCopyInto(out *AddVolumeOptions) { + *out = *in + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(Disk) + (*in).DeepCopyInto(*out) + } + if in.VolumeSource != nil { + in, out := &in.VolumeSource, &out.VolumeSource + *out = new(HotplugVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddVolumeOptions. +func (in *AddVolumeOptions) DeepCopy() *AddVolumeOptions { + if in == nil { + return nil + } + out := new(AddVolumeOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthorizedKeysFile) DeepCopyInto(out *AuthorizedKeysFile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthorizedKeysFile. +func (in *AuthorizedKeysFile) DeepCopy() *AuthorizedKeysFile { + if in == nil { + return nil + } + out := new(AuthorizedKeysFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BIOS) DeepCopyInto(out *BIOS) { + *out = *in + if in.UseSerial != nil { + in, out := &in.UseSerial, &out.UseSerial + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BIOS. +func (in *BIOS) DeepCopy() *BIOS { + if in == nil { + return nil + } + out := new(BIOS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockSize) DeepCopyInto(out *BlockSize) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomBlockSize) + **out = **in + } + if in.MatchVolume != nil { + in, out := &in.MatchVolume, &out.MatchVolume + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockSize. +func (in *BlockSize) DeepCopy() *BlockSize { + if in == nil { + return nil + } + out := new(BlockSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Bootloader) DeepCopyInto(out *Bootloader) { + *out = *in + if in.BIOS != nil { + in, out := &in.BIOS, &out.BIOS + *out = new(BIOS) + (*in).DeepCopyInto(*out) + } + if in.EFI != nil { + in, out := &in.EFI, &out.EFI + *out = new(EFI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bootloader. +func (in *Bootloader) DeepCopy() *Bootloader { + if in == nil { + return nil + } + out := new(Bootloader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDRomTarget) DeepCopyInto(out *CDRomTarget) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDRomTarget. +func (in *CDRomTarget) DeepCopy() *CDRomTarget { + if in == nil { + return nil + } + out := new(CDRomTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPU) DeepCopyInto(out *CPU) { + *out = *in + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]CPUFeature, len(*in)) + copy(*out, *in) + } + if in.NUMA != nil { + in, out := &in.NUMA, &out.NUMA + *out = new(NUMA) + (*in).DeepCopyInto(*out) + } + if in.Realtime != nil { + in, out := &in.Realtime, &out.Realtime + *out = new(Realtime) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. +func (in *CPU) DeepCopy() *CPU { + if in == nil { + return nil + } + out := new(CPU) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPUFeature) DeepCopyInto(out *CPUFeature) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPUFeature. +func (in *CPUFeature) DeepCopy() *CPUFeature { + if in == nil { + return nil + } + out := new(CPUFeature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertConfig) DeepCopyInto(out *CertConfig) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } + if in.RenewBefore != nil { + in, out := &in.RenewBefore, &out.RenewBefore + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertConfig. +func (in *CertConfig) DeepCopy() *CertConfig { + if in == nil { + return nil + } + out := new(CertConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Chassis) DeepCopyInto(out *Chassis) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Chassis. +func (in *Chassis) DeepCopy() *Chassis { + if in == nil { + return nil + } + out := new(Chassis) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientPassthroughDevices) DeepCopyInto(out *ClientPassthroughDevices) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientPassthroughDevices. +func (in *ClientPassthroughDevices) DeepCopy() *ClientPassthroughDevices { + if in == nil { + return nil + } + out := new(ClientPassthroughDevices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Clock) DeepCopyInto(out *Clock) { + *out = *in + in.ClockOffset.DeepCopyInto(&out.ClockOffset) + if in.Timer != nil { + in, out := &in.Timer, &out.Timer + *out = new(Timer) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Clock. +func (in *Clock) DeepCopy() *Clock { + if in == nil { + return nil + } + out := new(Clock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClockOffset) DeepCopyInto(out *ClockOffset) { + *out = *in + if in.UTC != nil { + in, out := &in.UTC, &out.UTC + *out = new(ClockOffsetUTC) + (*in).DeepCopyInto(*out) + } + if in.Timezone != nil { + in, out := &in.Timezone, &out.Timezone + *out = new(ClockOffsetTimezone) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClockOffset. +func (in *ClockOffset) DeepCopy() *ClockOffset { + if in == nil { + return nil + } + out := new(ClockOffset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClockOffsetUTC) DeepCopyInto(out *ClockOffsetUTC) { + *out = *in + if in.OffsetSeconds != nil { + in, out := &in.OffsetSeconds, &out.OffsetSeconds + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClockOffsetUTC. +func (in *ClockOffsetUTC) DeepCopy() *ClockOffsetUTC { + if in == nil { + return nil + } + out := new(ClockOffsetUTC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInitConfigDriveSource) DeepCopyInto(out *CloudInitConfigDriveSource) { + *out = *in + if in.UserDataSecretRef != nil { + in, out := &in.UserDataSecretRef, &out.UserDataSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NetworkDataSecretRef != nil { + in, out := &in.NetworkDataSecretRef, &out.NetworkDataSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitConfigDriveSource. +func (in *CloudInitConfigDriveSource) DeepCopy() *CloudInitConfigDriveSource { + if in == nil { + return nil + } + out := new(CloudInitConfigDriveSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudInitNoCloudSource) DeepCopyInto(out *CloudInitNoCloudSource) { + *out = *in + if in.UserDataSecretRef != nil { + in, out := &in.UserDataSecretRef, &out.UserDataSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.NetworkDataSecretRef != nil { + in, out := &in.NetworkDataSecretRef, &out.NetworkDataSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudInitNoCloudSource. +func (in *CloudInitNoCloudSource) DeepCopy() *CloudInitNoCloudSource { + if in == nil { + return nil + } + out := new(CloudInitNoCloudSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProfilerRequest) DeepCopyInto(out *ClusterProfilerRequest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProfilerRequest. +func (in *ClusterProfilerRequest) DeepCopy() *ClusterProfilerRequest { + if in == nil { + return nil + } + out := new(ClusterProfilerRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterProfilerResults) DeepCopyInto(out *ClusterProfilerResults) { + *out = *in + if in.ComponentResults != nil { + in, out := &in.ComponentResults, &out.ComponentResults + *out = make(map[string]ProfilerResult, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProfilerResults. +func (in *ClusterProfilerResults) DeepCopy() *ClusterProfilerResults { + if in == nil { + return nil + } + out := new(ClusterProfilerResults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentConfig) DeepCopyInto(out *ComponentConfig) { + *out = *in + if in.NodePlacement != nil { + in, out := &in.NodePlacement, &out.NodePlacement + *out = new(NodePlacement) + (*in).DeepCopyInto(*out) + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(byte) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentConfig. +func (in *ComponentConfig) DeepCopy() *ComponentConfig { + if in == nil { + return nil + } + out := new(ComponentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigDriveSSHPublicKeyAccessCredentialPropagation) DeepCopyInto(out *ConfigDriveSSHPublicKeyAccessCredentialPropagation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigDriveSSHPublicKeyAccessCredentialPropagation. +func (in *ConfigDriveSSHPublicKeyAccessCredentialPropagation) DeepCopy() *ConfigDriveSSHPublicKeyAccessCredentialPropagation { + if in == nil { + return nil + } + out := new(ConfigDriveSSHPublicKeyAccessCredentialPropagation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerDiskSource) DeepCopyInto(out *ContainerDiskSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerDiskSource. +func (in *ContainerDiskSource) DeepCopy() *ContainerDiskSource { + if in == nil { + return nil + } + out := new(ContainerDiskSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBlockSize) DeepCopyInto(out *CustomBlockSize) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBlockSize. +func (in *CustomBlockSize) DeepCopy() *CustomBlockSize { + if in == nil { + return nil + } + out := new(CustomBlockSize) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomProfile) DeepCopyInto(out *CustomProfile) { + *out = *in + if in.LocalhostProfile != nil { + in, out := &in.LocalhostProfile, &out.LocalhostProfile + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomProfile. +func (in *CustomProfile) DeepCopy() *CustomProfile { + if in == nil { + return nil + } + out := new(CustomProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizeComponents) DeepCopyInto(out *CustomizeComponents) { + *out = *in + if in.Patches != nil { + in, out := &in.Patches, &out.Patches + *out = make([]CustomizeComponentsPatch, len(*in)) + copy(*out, *in) + } + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = new(Flags) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponents. +func (in *CustomizeComponents) DeepCopy() *CustomizeComponents { + if in == nil { + return nil + } + out := new(CustomizeComponents) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomizeComponentsPatch) DeepCopyInto(out *CustomizeComponentsPatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomizeComponentsPatch. +func (in *CustomizeComponentsPatch) DeepCopy() *CustomizeComponentsPatch { + if in == nil { + return nil + } + out := new(CustomizeComponentsPatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DHCPOptions) DeepCopyInto(out *DHCPOptions) { + *out = *in + if in.NTPServers != nil { + in, out := &in.NTPServers, &out.NTPServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PrivateOptions != nil { + in, out := &in.PrivateOptions, &out.PrivateOptions + *out = make([]DHCPPrivateOptions, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPOptions. +func (in *DHCPOptions) DeepCopy() *DHCPOptions { + if in == nil { + return nil + } + out := new(DHCPOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DHCPPrivateOptions) DeepCopyInto(out *DHCPPrivateOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DHCPPrivateOptions. +func (in *DHCPPrivateOptions) DeepCopy() *DHCPPrivateOptions { + if in == nil { + return nil + } + out := new(DHCPPrivateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSource) DeepCopyInto(out *DataVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSource. +func (in *DataVolumeSource) DeepCopy() *DataVolumeSource { + if in == nil { + return nil + } + out := new(DataVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeTemplateDummyStatus) DeepCopyInto(out *DataVolumeTemplateDummyStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeTemplateDummyStatus. +func (in *DataVolumeTemplateDummyStatus) DeepCopy() *DataVolumeTemplateDummyStatus { + if in == nil { + return nil + } + out := new(DataVolumeTemplateDummyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeTemplateSpec) DeepCopyInto(out *DataVolumeTemplateSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(DataVolumeTemplateDummyStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeTemplateSpec. +func (in *DataVolumeTemplateSpec) DeepCopy() *DataVolumeTemplateSpec { + if in == nil { + return nil + } + out := new(DataVolumeTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeveloperConfiguration) DeepCopyInto(out *DeveloperConfiguration) { + *out = *in + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeSelectors != nil { + in, out := &in.NodeSelectors, &out.NodeSelectors + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MinimumClusterTSCFrequency != nil { + in, out := &in.MinimumClusterTSCFrequency, &out.MinimumClusterTSCFrequency + *out = new(int64) + **out = **in + } + if in.DiskVerification != nil { + in, out := &in.DiskVerification, &out.DiskVerification + *out = new(DiskVerification) + (*in).DeepCopyInto(*out) + } + if in.LogVerbosity != nil { + in, out := &in.LogVerbosity, &out.LogVerbosity + *out = new(LogVerbosity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeveloperConfiguration. +func (in *DeveloperConfiguration) DeepCopy() *DeveloperConfiguration { + if in == nil { + return nil + } + out := new(DeveloperConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Devices) DeepCopyInto(out *Devices) { + *out = *in + if in.UseVirtioTransitional != nil { + in, out := &in.UseVirtioTransitional, &out.UseVirtioTransitional + *out = new(bool) + **out = **in + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]Disk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Watchdog != nil { + in, out := &in.Watchdog, &out.Watchdog + *out = new(Watchdog) + (*in).DeepCopyInto(*out) + } + if in.Interfaces != nil { + in, out := &in.Interfaces, &out.Interfaces + *out = make([]Interface, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + *out = make([]Input, len(*in)) + copy(*out, *in) + } + if in.AutoattachPodInterface != nil { + in, out := &in.AutoattachPodInterface, &out.AutoattachPodInterface + *out = new(bool) + **out = **in + } + if in.AutoattachGraphicsDevice != nil { + in, out := &in.AutoattachGraphicsDevice, &out.AutoattachGraphicsDevice + *out = new(bool) + **out = **in + } + if in.AutoattachSerialConsole != nil { + in, out := &in.AutoattachSerialConsole, &out.AutoattachSerialConsole + *out = new(bool) + **out = **in + } + if in.AutoattachMemBalloon != nil { + in, out := &in.AutoattachMemBalloon, &out.AutoattachMemBalloon + *out = new(bool) + **out = **in + } + if in.AutoattachInputDevice != nil { + in, out := &in.AutoattachInputDevice, &out.AutoattachInputDevice + *out = new(bool) + **out = **in + } + if in.AutoattachVSOCK != nil { + in, out := &in.AutoattachVSOCK, &out.AutoattachVSOCK + *out = new(bool) + **out = **in + } + if in.Rng != nil { + in, out := &in.Rng, &out.Rng + *out = new(Rng) + **out = **in + } + if in.BlockMultiQueue != nil { + in, out := &in.BlockMultiQueue, &out.BlockMultiQueue + *out = new(bool) + **out = **in + } + if in.NetworkInterfaceMultiQueue != nil { + in, out := &in.NetworkInterfaceMultiQueue, &out.NetworkInterfaceMultiQueue + *out = new(bool) + **out = **in + } + if in.GPUs != nil { + in, out := &in.GPUs, &out.GPUs + *out = make([]GPU, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]Filesystem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostDevices != nil { + in, out := &in.HostDevices, &out.HostDevices + *out = make([]HostDevice, len(*in)) + copy(*out, *in) + } + if in.ClientPassthrough != nil { + in, out := &in.ClientPassthrough, &out.ClientPassthrough + *out = new(ClientPassthroughDevices) + **out = **in + } + if in.Sound != nil { + in, out := &in.Sound, &out.Sound + *out = new(SoundDevice) + **out = **in + } + if in.TPM != nil { + in, out := &in.TPM, &out.TPM + *out = new(TPMDevice) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Devices. +func (in *Devices) DeepCopy() *Devices { + if in == nil { + return nil + } + out := new(Devices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Disk) DeepCopyInto(out *Disk) { + *out = *in + in.DiskDevice.DeepCopyInto(&out.DiskDevice) + if in.BootOrder != nil { + in, out := &in.BootOrder, &out.BootOrder + *out = new(uint) + **out = **in + } + if in.DedicatedIOThread != nil { + in, out := &in.DedicatedIOThread, &out.DedicatedIOThread + *out = new(bool) + **out = **in + } + if in.BlockSize != nil { + in, out := &in.BlockSize, &out.BlockSize + *out = new(BlockSize) + (*in).DeepCopyInto(*out) + } + if in.Shareable != nil { + in, out := &in.Shareable, &out.Shareable + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk. +func (in *Disk) DeepCopy() *Disk { + if in == nil { + return nil + } + out := new(Disk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskDevice) DeepCopyInto(out *DiskDevice) { + *out = *in + if in.Disk != nil { + in, out := &in.Disk, &out.Disk + *out = new(DiskTarget) + **out = **in + } + if in.LUN != nil { + in, out := &in.LUN, &out.LUN + *out = new(LunTarget) + **out = **in + } + if in.CDRom != nil { + in, out := &in.CDRom, &out.CDRom + *out = new(CDRomTarget) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskDevice. +func (in *DiskDevice) DeepCopy() *DiskDevice { + if in == nil { + return nil + } + out := new(DiskDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskTarget) DeepCopyInto(out *DiskTarget) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskTarget. +func (in *DiskTarget) DeepCopy() *DiskTarget { + if in == nil { + return nil + } + out := new(DiskTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskVerification) DeepCopyInto(out *DiskVerification) { + *out = *in + if in.MemoryLimit != nil { + in, out := &in.MemoryLimit, &out.MemoryLimit + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskVerification. +func (in *DiskVerification) DeepCopy() *DiskVerification { + if in == nil { + return nil + } + out := new(DiskVerification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainMemoryDumpInfo) DeepCopyInto(out *DomainMemoryDumpInfo) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.EndTimestamp != nil { + in, out := &in.EndTimestamp, &out.EndTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainMemoryDumpInfo. +func (in *DomainMemoryDumpInfo) DeepCopy() *DomainMemoryDumpInfo { + if in == nil { + return nil + } + out := new(DomainMemoryDumpInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DomainSpec) DeepCopyInto(out *DomainSpec) { + *out = *in + in.Resources.DeepCopyInto(&out.Resources) + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(CPU) + (*in).DeepCopyInto(*out) + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(Memory) + (*in).DeepCopyInto(*out) + } + if in.Machine != nil { + in, out := &in.Machine, &out.Machine + *out = new(Machine) + **out = **in + } + if in.Firmware != nil { + in, out := &in.Firmware, &out.Firmware + *out = new(Firmware) + (*in).DeepCopyInto(*out) + } + if in.Clock != nil { + in, out := &in.Clock, &out.Clock + *out = new(Clock) + (*in).DeepCopyInto(*out) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = new(Features) + (*in).DeepCopyInto(*out) + } + in.Devices.DeepCopyInto(&out.Devices) + if in.IOThreadsPolicy != nil { + in, out := &in.IOThreadsPolicy, &out.IOThreadsPolicy + *out = new(IOThreadsPolicy) + **out = **in + } + if in.Chassis != nil { + in, out := &in.Chassis, &out.Chassis + *out = new(Chassis) + **out = **in + } + if in.LaunchSecurity != nil { + in, out := &in.LaunchSecurity, &out.LaunchSecurity + *out = new(LaunchSecurity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DomainSpec. +func (in *DomainSpec) DeepCopy() *DomainSpec { + if in == nil { + return nil + } + out := new(DomainSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Fields != nil { + in, out := &in.Fields, &out.Fields + *out = make([]corev1.DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardMetricsVolumeSource) DeepCopyInto(out *DownwardMetricsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardMetricsVolumeSource. +func (in *DownwardMetricsVolumeSource) DeepCopy() *DownwardMetricsVolumeSource { + if in == nil { + return nil + } + out := new(DownwardMetricsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EFI) DeepCopyInto(out *EFI) { + *out = *in + if in.SecureBoot != nil { + in, out := &in.SecureBoot, &out.SecureBoot + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EFI. +func (in *EFI) DeepCopy() *EFI { + if in == nil { + return nil + } + out := new(EFI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDiskSource) DeepCopyInto(out *EmptyDiskSource) { + *out = *in + out.Capacity = in.Capacity.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDiskSource. +func (in *EmptyDiskSource) DeepCopy() *EmptyDiskSource { + if in == nil { + return nil + } + out := new(EmptyDiskSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) { + *out = *in + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource. +func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource { + if in == nil { + return nil + } + out := new(EphemeralVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureAPIC) DeepCopyInto(out *FeatureAPIC) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureAPIC. +func (in *FeatureAPIC) DeepCopy() *FeatureAPIC { + if in == nil { + return nil + } + out := new(FeatureAPIC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureHyperv) DeepCopyInto(out *FeatureHyperv) { + *out = *in + if in.Relaxed != nil { + in, out := &in.Relaxed, &out.Relaxed + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.VAPIC != nil { + in, out := &in.VAPIC, &out.VAPIC + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.Spinlocks != nil { + in, out := &in.Spinlocks, &out.Spinlocks + *out = new(FeatureSpinlocks) + (*in).DeepCopyInto(*out) + } + if in.VPIndex != nil { + in, out := &in.VPIndex, &out.VPIndex + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.Runtime != nil { + in, out := &in.Runtime, &out.Runtime + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.SyNIC != nil { + in, out := &in.SyNIC, &out.SyNIC + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.SyNICTimer != nil { + in, out := &in.SyNICTimer, &out.SyNICTimer + *out = new(SyNICTimer) + (*in).DeepCopyInto(*out) + } + if in.Reset != nil { + in, out := &in.Reset, &out.Reset + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.VendorID != nil { + in, out := &in.VendorID, &out.VendorID + *out = new(FeatureVendorID) + (*in).DeepCopyInto(*out) + } + if in.Frequencies != nil { + in, out := &in.Frequencies, &out.Frequencies + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.Reenlightenment != nil { + in, out := &in.Reenlightenment, &out.Reenlightenment + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.TLBFlush != nil { + in, out := &in.TLBFlush, &out.TLBFlush + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.IPI != nil { + in, out := &in.IPI, &out.IPI + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.EVMCS != nil { + in, out := &in.EVMCS, &out.EVMCS + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureHyperv. +func (in *FeatureHyperv) DeepCopy() *FeatureHyperv { + if in == nil { + return nil + } + out := new(FeatureHyperv) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureKVM) DeepCopyInto(out *FeatureKVM) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureKVM. +func (in *FeatureKVM) DeepCopy() *FeatureKVM { + if in == nil { + return nil + } + out := new(FeatureKVM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureSpinlocks) DeepCopyInto(out *FeatureSpinlocks) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Retries != nil { + in, out := &in.Retries, &out.Retries + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureSpinlocks. +func (in *FeatureSpinlocks) DeepCopy() *FeatureSpinlocks { + if in == nil { + return nil + } + out := new(FeatureSpinlocks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureState) DeepCopyInto(out *FeatureState) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureState. +func (in *FeatureState) DeepCopy() *FeatureState { + if in == nil { + return nil + } + out := new(FeatureState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureVendorID) DeepCopyInto(out *FeatureVendorID) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureVendorID. +func (in *FeatureVendorID) DeepCopy() *FeatureVendorID { + if in == nil { + return nil + } + out := new(FeatureVendorID) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Features) DeepCopyInto(out *Features) { + *out = *in + in.ACPI.DeepCopyInto(&out.ACPI) + if in.APIC != nil { + in, out := &in.APIC, &out.APIC + *out = new(FeatureAPIC) + (*in).DeepCopyInto(*out) + } + if in.Hyperv != nil { + in, out := &in.Hyperv, &out.Hyperv + *out = new(FeatureHyperv) + (*in).DeepCopyInto(*out) + } + if in.SMM != nil { + in, out := &in.SMM, &out.SMM + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + if in.KVM != nil { + in, out := &in.KVM, &out.KVM + *out = new(FeatureKVM) + **out = **in + } + if in.Pvspinlock != nil { + in, out := &in.Pvspinlock, &out.Pvspinlock + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Features. +func (in *Features) DeepCopy() *Features { + if in == nil { + return nil + } + out := new(Features) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filesystem) DeepCopyInto(out *Filesystem) { + *out = *in + if in.Virtiofs != nil { + in, out := &in.Virtiofs, &out.Virtiofs + *out = new(FilesystemVirtiofs) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filesystem. +func (in *Filesystem) DeepCopy() *Filesystem { + if in == nil { + return nil + } + out := new(Filesystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemVirtiofs) DeepCopyInto(out *FilesystemVirtiofs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemVirtiofs. +func (in *FilesystemVirtiofs) DeepCopy() *FilesystemVirtiofs { + if in == nil { + return nil + } + out := new(FilesystemVirtiofs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Firmware) DeepCopyInto(out *Firmware) { + *out = *in + if in.Bootloader != nil { + in, out := &in.Bootloader, &out.Bootloader + *out = new(Bootloader) + (*in).DeepCopyInto(*out) + } + if in.KernelBoot != nil { + in, out := &in.KernelBoot, &out.KernelBoot + *out = new(KernelBoot) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Firmware. +func (in *Firmware) DeepCopy() *Firmware { + if in == nil { + return nil + } + out := new(Firmware) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Flags) DeepCopyInto(out *Flags) { + *out = *in + if in.API != nil { + in, out := &in.API, &out.API + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Handler != nil { + in, out := &in.Handler, &out.Handler + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Flags. +func (in *Flags) DeepCopy() *Flags { + if in == nil { + return nil + } + out := new(Flags) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FreezeUnfreezeTimeout) DeepCopyInto(out *FreezeUnfreezeTimeout) { + *out = *in + if in.UnfreezeTimeout != nil { + in, out := &in.UnfreezeTimeout, &out.UnfreezeTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FreezeUnfreezeTimeout. +func (in *FreezeUnfreezeTimeout) DeepCopy() *FreezeUnfreezeTimeout { + if in == nil { + return nil + } + out := new(FreezeUnfreezeTimeout) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GPU) DeepCopyInto(out *GPU) { + *out = *in + if in.VirtualGPUOptions != nil { + in, out := &in.VirtualGPUOptions, &out.VirtualGPUOptions + *out = new(VGPUOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GPU. +func (in *GPU) DeepCopy() *GPU { + if in == nil { + return nil + } + out := new(GPU) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenerationStatus) DeepCopyInto(out *GenerationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerationStatus. +func (in *GenerationStatus) DeepCopy() *GenerationStatus { + if in == nil { + return nil + } + out := new(GenerationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GuestAgentCommandInfo) DeepCopyInto(out *GuestAgentCommandInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestAgentCommandInfo. +func (in *GuestAgentCommandInfo) DeepCopy() *GuestAgentCommandInfo { + if in == nil { + return nil + } + out := new(GuestAgentCommandInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GuestAgentPing) DeepCopyInto(out *GuestAgentPing) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestAgentPing. +func (in *GuestAgentPing) DeepCopy() *GuestAgentPing { + if in == nil { + return nil + } + out := new(GuestAgentPing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPETTimer) DeepCopyInto(out *HPETTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPETTimer. +func (in *HPETTimer) DeepCopy() *HPETTimer { + if in == nil { + return nil + } + out := new(HPETTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(corev1.ExecAction) + (*in).DeepCopyInto(*out) + } + if in.GuestAgentPing != nil { + in, out := &in.GuestAgentPing, &out.GuestAgentPing + *out = new(GuestAgentPing) + **out = **in + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(corev1.HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(corev1.TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostDevice) DeepCopyInto(out *HostDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostDevice. +func (in *HostDevice) DeepCopy() *HostDevice { + if in == nil { + return nil + } + out := new(HostDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostDisk) DeepCopyInto(out *HostDisk) { + *out = *in + out.Capacity = in.Capacity.DeepCopy() + if in.Shared != nil { + in, out := &in.Shared, &out.Shared + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostDisk. +func (in *HostDisk) DeepCopy() *HostDisk { + if in == nil { + return nil + } + out := new(HostDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HotplugVolumeSource) DeepCopyInto(out *HotplugVolumeSource) { + *out = *in + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.DataVolume != nil { + in, out := &in.DataVolume, &out.DataVolume + *out = new(DataVolumeSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HotplugVolumeSource. +func (in *HotplugVolumeSource) DeepCopy() *HotplugVolumeSource { + if in == nil { + return nil + } + out := new(HotplugVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HotplugVolumeStatus) DeepCopyInto(out *HotplugVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HotplugVolumeStatus. +func (in *HotplugVolumeStatus) DeepCopy() *HotplugVolumeStatus { + if in == nil { + return nil + } + out := new(HotplugVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hugepages) DeepCopyInto(out *Hugepages) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hugepages. +func (in *Hugepages) DeepCopy() *Hugepages { + if in == nil { + return nil + } + out := new(Hugepages) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HypervTimer) DeepCopyInto(out *HypervTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HypervTimer. +func (in *HypervTimer) DeepCopy() *HypervTimer { + if in == nil { + return nil + } + out := new(HypervTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *I6300ESBWatchdog) DeepCopyInto(out *I6300ESBWatchdog) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new I6300ESBWatchdog. +func (in *I6300ESBWatchdog) DeepCopy() *I6300ESBWatchdog { + if in == nil { + return nil + } + out := new(I6300ESBWatchdog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Input) DeepCopyInto(out *Input) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Input. +func (in *Input) DeepCopy() *Input { + if in == nil { + return nil + } + out := new(Input) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstancetypeMatcher) DeepCopyInto(out *InstancetypeMatcher) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstancetypeMatcher. +func (in *InstancetypeMatcher) DeepCopy() *InstancetypeMatcher { + if in == nil { + return nil + } + out := new(InstancetypeMatcher) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Interface) DeepCopyInto(out *Interface) { + *out = *in + in.InterfaceBindingMethod.DeepCopyInto(&out.InterfaceBindingMethod) + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]Port, len(*in)) + copy(*out, *in) + } + if in.BootOrder != nil { + in, out := &in.BootOrder, &out.BootOrder + *out = new(uint) + **out = **in + } + if in.DHCPOptions != nil { + in, out := &in.DHCPOptions, &out.DHCPOptions + *out = new(DHCPOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Interface. +func (in *Interface) DeepCopy() *Interface { + if in == nil { + return nil + } + out := new(Interface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceBindingMethod) DeepCopyInto(out *InterfaceBindingMethod) { + *out = *in + if in.Bridge != nil { + in, out := &in.Bridge, &out.Bridge + *out = new(InterfaceBridge) + **out = **in + } + if in.Slirp != nil { + in, out := &in.Slirp, &out.Slirp + *out = new(InterfaceSlirp) + **out = **in + } + if in.Masquerade != nil { + in, out := &in.Masquerade, &out.Masquerade + *out = new(InterfaceMasquerade) + **out = **in + } + if in.SRIOV != nil { + in, out := &in.SRIOV, &out.SRIOV + *out = new(InterfaceSRIOV) + **out = **in + } + if in.Macvtap != nil { + in, out := &in.Macvtap, &out.Macvtap + *out = new(InterfaceMacvtap) + **out = **in + } + if in.Passt != nil { + in, out := &in.Passt, &out.Passt + *out = new(InterfacePasst) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceBindingMethod. +func (in *InterfaceBindingMethod) DeepCopy() *InterfaceBindingMethod { + if in == nil { + return nil + } + out := new(InterfaceBindingMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceBridge) DeepCopyInto(out *InterfaceBridge) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceBridge. +func (in *InterfaceBridge) DeepCopy() *InterfaceBridge { + if in == nil { + return nil + } + out := new(InterfaceBridge) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceMacvtap) DeepCopyInto(out *InterfaceMacvtap) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceMacvtap. +func (in *InterfaceMacvtap) DeepCopy() *InterfaceMacvtap { + if in == nil { + return nil + } + out := new(InterfaceMacvtap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceMasquerade) DeepCopyInto(out *InterfaceMasquerade) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceMasquerade. +func (in *InterfaceMasquerade) DeepCopy() *InterfaceMasquerade { + if in == nil { + return nil + } + out := new(InterfaceMasquerade) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfacePasst) DeepCopyInto(out *InterfacePasst) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfacePasst. +func (in *InterfacePasst) DeepCopy() *InterfacePasst { + if in == nil { + return nil + } + out := new(InterfacePasst) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceSRIOV) DeepCopyInto(out *InterfaceSRIOV) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceSRIOV. +func (in *InterfaceSRIOV) DeepCopy() *InterfaceSRIOV { + if in == nil { + return nil + } + out := new(InterfaceSRIOV) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterfaceSlirp) DeepCopyInto(out *InterfaceSlirp) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterfaceSlirp. +func (in *InterfaceSlirp) DeepCopy() *InterfaceSlirp { + if in == nil { + return nil + } + out := new(InterfaceSlirp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KVMTimer) DeepCopyInto(out *KVMTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KVMTimer. +func (in *KVMTimer) DeepCopy() *KVMTimer { + if in == nil { + return nil + } + out := new(KVMTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelBoot) DeepCopyInto(out *KernelBoot) { + *out = *in + if in.Container != nil { + in, out := &in.Container, &out.Container + *out = new(KernelBootContainer) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelBoot. +func (in *KernelBoot) DeepCopy() *KernelBoot { + if in == nil { + return nil + } + out := new(KernelBoot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KernelBootContainer) DeepCopyInto(out *KernelBootContainer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KernelBootContainer. +func (in *KernelBootContainer) DeepCopy() *KernelBootContainer { + if in == nil { + return nil + } + out := new(KernelBootContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirt) DeepCopyInto(out *KubeVirt) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirt. +func (in *KubeVirt) DeepCopy() *KubeVirt { + if in == nil { + return nil + } + out := new(KubeVirt) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeVirt) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtCertificateRotateStrategy) DeepCopyInto(out *KubeVirtCertificateRotateStrategy) { + *out = *in + if in.SelfSigned != nil { + in, out := &in.SelfSigned, &out.SelfSigned + *out = new(KubeVirtSelfSignConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtCertificateRotateStrategy. +func (in *KubeVirtCertificateRotateStrategy) DeepCopy() *KubeVirtCertificateRotateStrategy { + if in == nil { + return nil + } + out := new(KubeVirtCertificateRotateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtCondition) DeepCopyInto(out *KubeVirtCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtCondition. +func (in *KubeVirtCondition) DeepCopy() *KubeVirtCondition { + if in == nil { + return nil + } + out := new(KubeVirtCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtConfiguration) DeepCopyInto(out *KubeVirtConfiguration) { + *out = *in + if in.CPURequest != nil { + in, out := &in.CPURequest, &out.CPURequest + x := (*in).DeepCopy() + *out = &x + } + if in.DeveloperConfiguration != nil { + in, out := &in.DeveloperConfiguration, &out.DeveloperConfiguration + *out = new(DeveloperConfiguration) + (*in).DeepCopyInto(*out) + } + if in.EmulatedMachines != nil { + in, out := &in.EmulatedMachines, &out.EmulatedMachines + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MigrationConfiguration != nil { + in, out := &in.MigrationConfiguration, &out.MigrationConfiguration + *out = new(MigrationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.NetworkConfiguration != nil { + in, out := &in.NetworkConfiguration, &out.NetworkConfiguration + *out = new(NetworkConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SMBIOSConfig != nil { + in, out := &in.SMBIOSConfig, &out.SMBIOSConfig + *out = new(SMBiosConfiguration) + **out = **in + } + if in.EvictionStrategy != nil { + in, out := &in.EvictionStrategy, &out.EvictionStrategy + *out = new(EvictionStrategy) + **out = **in + } + if in.SupportedGuestAgentVersions != nil { + in, out := &in.SupportedGuestAgentVersions, &out.SupportedGuestAgentVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MemBalloonStatsPeriod != nil { + in, out := &in.MemBalloonStatsPeriod, &out.MemBalloonStatsPeriod + *out = new(uint32) + **out = **in + } + if in.PermittedHostDevices != nil { + in, out := &in.PermittedHostDevices, &out.PermittedHostDevices + *out = new(PermittedHostDevices) + (*in).DeepCopyInto(*out) + } + if in.MediatedDevicesConfiguration != nil { + in, out := &in.MediatedDevicesConfiguration, &out.MediatedDevicesConfiguration + *out = new(MediatedDevicesConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ObsoleteCPUModels != nil { + in, out := &in.ObsoleteCPUModels, &out.ObsoleteCPUModels + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.VirtualMachineInstancesPerNode != nil { + in, out := &in.VirtualMachineInstancesPerNode, &out.VirtualMachineInstancesPerNode + *out = new(int) + **out = **in + } + if in.APIConfiguration != nil { + in, out := &in.APIConfiguration, &out.APIConfiguration + *out = new(ReloadableComponentConfiguration) + (*in).DeepCopyInto(*out) + } + if in.WebhookConfiguration != nil { + in, out := &in.WebhookConfiguration, &out.WebhookConfiguration + *out = new(ReloadableComponentConfiguration) + (*in).DeepCopyInto(*out) + } + if in.ControllerConfiguration != nil { + in, out := &in.ControllerConfiguration, &out.ControllerConfiguration + *out = new(ReloadableComponentConfiguration) + (*in).DeepCopyInto(*out) + } + if in.HandlerConfiguration != nil { + in, out := &in.HandlerConfiguration, &out.HandlerConfiguration + *out = new(ReloadableComponentConfiguration) + (*in).DeepCopyInto(*out) + } + if in.TLSConfiguration != nil { + in, out := &in.TLSConfiguration, &out.TLSConfiguration + *out = new(TLSConfiguration) + (*in).DeepCopyInto(*out) + } + if in.SeccompConfiguration != nil { + in, out := &in.SeccompConfiguration, &out.SeccompConfiguration + *out = new(SeccompConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtConfiguration. +func (in *KubeVirtConfiguration) DeepCopy() *KubeVirtConfiguration { + if in == nil { + return nil + } + out := new(KubeVirtConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtList) DeepCopyInto(out *KubeVirtList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeVirt, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtList. +func (in *KubeVirtList) DeepCopy() *KubeVirtList { + if in == nil { + return nil + } + out := new(KubeVirtList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeVirtList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtSelfSignConfiguration) DeepCopyInto(out *KubeVirtSelfSignConfiguration) { + *out = *in + if in.CARotateInterval != nil { + in, out := &in.CARotateInterval, &out.CARotateInterval + *out = new(metav1.Duration) + **out = **in + } + if in.CertRotateInterval != nil { + in, out := &in.CertRotateInterval, &out.CertRotateInterval + *out = new(metav1.Duration) + **out = **in + } + if in.CAOverlapInterval != nil { + in, out := &in.CAOverlapInterval, &out.CAOverlapInterval + *out = new(metav1.Duration) + **out = **in + } + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(CertConfig) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(CertConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtSelfSignConfiguration. +func (in *KubeVirtSelfSignConfiguration) DeepCopy() *KubeVirtSelfSignConfiguration { + if in == nil { + return nil + } + out := new(KubeVirtSelfSignConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtSpec) DeepCopyInto(out *KubeVirtSpec) { + *out = *in + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]corev1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + in.WorkloadUpdateStrategy.DeepCopyInto(&out.WorkloadUpdateStrategy) + in.CertificateRotationStrategy.DeepCopyInto(&out.CertificateRotationStrategy) + in.Configuration.DeepCopyInto(&out.Configuration) + if in.Infra != nil { + in, out := &in.Infra, &out.Infra + *out = new(ComponentConfig) + (*in).DeepCopyInto(*out) + } + if in.Workloads != nil { + in, out := &in.Workloads, &out.Workloads + *out = new(ComponentConfig) + (*in).DeepCopyInto(*out) + } + in.CustomizeComponents.DeepCopyInto(&out.CustomizeComponents) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtSpec. +func (in *KubeVirtSpec) DeepCopy() *KubeVirtSpec { + if in == nil { + return nil + } + out := new(KubeVirtSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtStatus) DeepCopyInto(out *KubeVirtStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]KubeVirtCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OutdatedVirtualMachineInstanceWorkloads != nil { + in, out := &in.OutdatedVirtualMachineInstanceWorkloads, &out.OutdatedVirtualMachineInstanceWorkloads + *out = new(int) + **out = **in + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.Generations != nil { + in, out := &in.Generations, &out.Generations + *out = make([]GenerationStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtStatus. +func (in *KubeVirtStatus) DeepCopy() *KubeVirtStatus { + if in == nil { + return nil + } + out := new(KubeVirtStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeVirtWorkloadUpdateStrategy) DeepCopyInto(out *KubeVirtWorkloadUpdateStrategy) { + *out = *in + if in.WorkloadUpdateMethods != nil { + in, out := &in.WorkloadUpdateMethods, &out.WorkloadUpdateMethods + *out = make([]WorkloadUpdateMethod, len(*in)) + copy(*out, *in) + } + if in.BatchEvictionSize != nil { + in, out := &in.BatchEvictionSize, &out.BatchEvictionSize + *out = new(int) + **out = **in + } + if in.BatchEvictionInterval != nil { + in, out := &in.BatchEvictionInterval, &out.BatchEvictionInterval + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeVirtWorkloadUpdateStrategy. +func (in *KubeVirtWorkloadUpdateStrategy) DeepCopy() *KubeVirtWorkloadUpdateStrategy { + if in == nil { + return nil + } + out := new(KubeVirtWorkloadUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LaunchSecurity) DeepCopyInto(out *LaunchSecurity) { + *out = *in + if in.SEV != nil { + in, out := &in.SEV, &out.SEV + *out = new(SEV) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LaunchSecurity. +func (in *LaunchSecurity) DeepCopy() *LaunchSecurity { + if in == nil { + return nil + } + out := new(LaunchSecurity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogVerbosity) DeepCopyInto(out *LogVerbosity) { + *out = *in + if in.NodeVerbosity != nil { + in, out := &in.NodeVerbosity, &out.NodeVerbosity + *out = make(map[string]uint, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogVerbosity. +func (in *LogVerbosity) DeepCopy() *LogVerbosity { + if in == nil { + return nil + } + out := new(LogVerbosity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LunTarget) DeepCopyInto(out *LunTarget) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LunTarget. +func (in *LunTarget) DeepCopy() *LunTarget { + if in == nil { + return nil + } + out := new(LunTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediatedDevicesConfiguration) DeepCopyInto(out *MediatedDevicesConfiguration) { + *out = *in + if in.MediatedDevicesTypes != nil { + in, out := &in.MediatedDevicesTypes, &out.MediatedDevicesTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MediatedDeviceTypes != nil { + in, out := &in.MediatedDeviceTypes, &out.MediatedDeviceTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NodeMediatedDeviceTypes != nil { + in, out := &in.NodeMediatedDeviceTypes, &out.NodeMediatedDeviceTypes + *out = make([]NodeMediatedDeviceTypesConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediatedDevicesConfiguration. +func (in *MediatedDevicesConfiguration) DeepCopy() *MediatedDevicesConfiguration { + if in == nil { + return nil + } + out := new(MediatedDevicesConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MediatedHostDevice) DeepCopyInto(out *MediatedHostDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MediatedHostDevice. +func (in *MediatedHostDevice) DeepCopy() *MediatedHostDevice { + if in == nil { + return nil + } + out := new(MediatedHostDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Memory) DeepCopyInto(out *Memory) { + *out = *in + if in.Hugepages != nil { + in, out := &in.Hugepages, &out.Hugepages + *out = new(Hugepages) + **out = **in + } + if in.Guest != nil { + in, out := &in.Guest, &out.Guest + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Memory. +func (in *Memory) DeepCopy() *Memory { + if in == nil { + return nil + } + out := new(Memory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MemoryDumpVolumeSource) DeepCopyInto(out *MemoryDumpVolumeSource) { + *out = *in + out.PersistentVolumeClaimVolumeSource = in.PersistentVolumeClaimVolumeSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryDumpVolumeSource. +func (in *MemoryDumpVolumeSource) DeepCopy() *MemoryDumpVolumeSource { + if in == nil { + return nil + } + out := new(MemoryDumpVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrateOptions) DeepCopyInto(out *MigrateOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrateOptions. +func (in *MigrateOptions) DeepCopy() *MigrateOptions { + if in == nil { + return nil + } + out := new(MigrateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationConfiguration) DeepCopyInto(out *MigrationConfiguration) { + *out = *in + if in.NodeDrainTaintKey != nil { + in, out := &in.NodeDrainTaintKey, &out.NodeDrainTaintKey + *out = new(string) + **out = **in + } + if in.ParallelOutboundMigrationsPerNode != nil { + in, out := &in.ParallelOutboundMigrationsPerNode, &out.ParallelOutboundMigrationsPerNode + *out = new(uint32) + **out = **in + } + if in.ParallelMigrationsPerCluster != nil { + in, out := &in.ParallelMigrationsPerCluster, &out.ParallelMigrationsPerCluster + *out = new(uint32) + **out = **in + } + if in.AllowAutoConverge != nil { + in, out := &in.AllowAutoConverge, &out.AllowAutoConverge + *out = new(bool) + **out = **in + } + if in.BandwidthPerMigration != nil { + in, out := &in.BandwidthPerMigration, &out.BandwidthPerMigration + x := (*in).DeepCopy() + *out = &x + } + if in.CompletionTimeoutPerGiB != nil { + in, out := &in.CompletionTimeoutPerGiB, &out.CompletionTimeoutPerGiB + *out = new(int64) + **out = **in + } + if in.ProgressTimeout != nil { + in, out := &in.ProgressTimeout, &out.ProgressTimeout + *out = new(int64) + **out = **in + } + if in.UnsafeMigrationOverride != nil { + in, out := &in.UnsafeMigrationOverride, &out.UnsafeMigrationOverride + *out = new(bool) + **out = **in + } + if in.AllowPostCopy != nil { + in, out := &in.AllowPostCopy, &out.AllowPostCopy + *out = new(bool) + **out = **in + } + if in.DisableTLS != nil { + in, out := &in.DisableTLS, &out.DisableTLS + *out = new(bool) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationConfiguration. +func (in *MigrationConfiguration) DeepCopy() *MigrationConfiguration { + if in == nil { + return nil + } + out := new(MigrationConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MultusNetwork) DeepCopyInto(out *MultusNetwork) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MultusNetwork. +func (in *MultusNetwork) DeepCopy() *MultusNetwork { + if in == nil { + return nil + } + out := new(MultusNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NUMA) DeepCopyInto(out *NUMA) { + *out = *in + if in.GuestMappingPassthrough != nil { + in, out := &in.GuestMappingPassthrough, &out.GuestMappingPassthrough + *out = new(NUMAGuestMappingPassthrough) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA. +func (in *NUMA) DeepCopy() *NUMA { + if in == nil { + return nil + } + out := new(NUMA) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NUMAGuestMappingPassthrough) DeepCopyInto(out *NUMAGuestMappingPassthrough) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMAGuestMappingPassthrough. +func (in *NUMAGuestMappingPassthrough) DeepCopy() *NUMAGuestMappingPassthrough { + if in == nil { + return nil + } + out := new(NUMAGuestMappingPassthrough) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + in.NetworkSource.DeepCopyInto(&out.NetworkSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkConfiguration) DeepCopyInto(out *NetworkConfiguration) { + *out = *in + if in.PermitSlirpInterface != nil { + in, out := &in.PermitSlirpInterface, &out.PermitSlirpInterface + *out = new(bool) + **out = **in + } + if in.PermitBridgeInterfaceOnPodNetwork != nil { + in, out := &in.PermitBridgeInterfaceOnPodNetwork, &out.PermitBridgeInterfaceOnPodNetwork + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkConfiguration. +func (in *NetworkConfiguration) DeepCopy() *NetworkConfiguration { + if in == nil { + return nil + } + out := new(NetworkConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSource) DeepCopyInto(out *NetworkSource) { + *out = *in + if in.Pod != nil { + in, out := &in.Pod, &out.Pod + *out = new(PodNetwork) + **out = **in + } + if in.Multus != nil { + in, out := &in.Multus, &out.Multus + *out = new(MultusNetwork) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSource. +func (in *NetworkSource) DeepCopy() *NetworkSource { + if in == nil { + return nil + } + out := new(NetworkSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMediatedDeviceTypesConfig) DeepCopyInto(out *NodeMediatedDeviceTypesConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MediatedDevicesTypes != nil { + in, out := &in.MediatedDevicesTypes, &out.MediatedDevicesTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MediatedDeviceTypes != nil { + in, out := &in.MediatedDeviceTypes, &out.MediatedDeviceTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMediatedDeviceTypesConfig. +func (in *NodeMediatedDeviceTypesConfig) DeepCopy() *NodeMediatedDeviceTypesConfig { + if in == nil { + return nil + } + out := new(NodeMediatedDeviceTypesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PITTimer) DeepCopyInto(out *PITTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PITTimer. +func (in *PITTimer) DeepCopy() *PITTimer { + if in == nil { + return nil + } + out := new(PITTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PauseOptions) DeepCopyInto(out *PauseOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseOptions. +func (in *PauseOptions) DeepCopy() *PauseOptions { + if in == nil { + return nil + } + out := new(PauseOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PciHostDevice) DeepCopyInto(out *PciHostDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PciHostDevice. +func (in *PciHostDevice) DeepCopy() *PciHostDevice { + if in == nil { + return nil + } + out := new(PciHostDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermittedHostDevices) DeepCopyInto(out *PermittedHostDevices) { + *out = *in + if in.PciHostDevices != nil { + in, out := &in.PciHostDevices, &out.PciHostDevices + *out = make([]PciHostDevice, len(*in)) + copy(*out, *in) + } + if in.MediatedDevices != nil { + in, out := &in.MediatedDevices, &out.MediatedDevices + *out = make([]MediatedHostDevice, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermittedHostDevices. +func (in *PermittedHostDevices) DeepCopy() *PermittedHostDevices { + if in == nil { + return nil + } + out := new(PermittedHostDevices) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimInfo) DeepCopyInto(out *PersistentVolumeClaimInfo) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]corev1.PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(corev1.PersistentVolumeMode) + **out = **in + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.FilesystemOverhead != nil { + in, out := &in.FilesystemOverhead, &out.FilesystemOverhead + *out = new(v1beta1.Percent) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimInfo. +func (in *PersistentVolumeClaimInfo) DeepCopy() *PersistentVolumeClaimInfo { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + out.PersistentVolumeClaimVolumeSource = in.PersistentVolumeClaimVolumeSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetwork) DeepCopyInto(out *PodNetwork) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetwork. +func (in *PodNetwork) DeepCopy() *PodNetwork { + if in == nil { + return nil + } + out := new(PodNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Port) DeepCopyInto(out *Port) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Port. +func (in *Port) DeepCopy() *Port { + if in == nil { + return nil + } + out := new(Port) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferenceMatcher) DeepCopyInto(out *PreferenceMatcher) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferenceMatcher. +func (in *PreferenceMatcher) DeepCopy() *PreferenceMatcher { + if in == nil { + return nil + } + out := new(PreferenceMatcher) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfilerResult) DeepCopyInto(out *ProfilerResult) { + *out = *in + if in.PprofData != nil { + in, out := &in.PprofData, &out.PprofData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfilerResult. +func (in *ProfilerResult) DeepCopy() *ProfilerResult { + if in == nil { + return nil + } + out := new(ProfilerResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) DeepCopyInto(out *QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QemuGuestAgentSSHPublicKeyAccessCredentialPropagation. +func (in *QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) DeepCopy() *QemuGuestAgentSSHPublicKeyAccessCredentialPropagation { + if in == nil { + return nil + } + out := new(QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QemuGuestAgentUserPasswordAccessCredentialPropagation) DeepCopyInto(out *QemuGuestAgentUserPasswordAccessCredentialPropagation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QemuGuestAgentUserPasswordAccessCredentialPropagation. +func (in *QemuGuestAgentUserPasswordAccessCredentialPropagation) DeepCopy() *QemuGuestAgentUserPasswordAccessCredentialPropagation { + if in == nil { + return nil + } + out := new(QemuGuestAgentUserPasswordAccessCredentialPropagation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RESTClientConfiguration) DeepCopyInto(out *RESTClientConfiguration) { + *out = *in + if in.RateLimiter != nil { + in, out := &in.RateLimiter, &out.RateLimiter + *out = new(RateLimiter) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RESTClientConfiguration. +func (in *RESTClientConfiguration) DeepCopy() *RESTClientConfiguration { + if in == nil { + return nil + } + out := new(RESTClientConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RTCTimer) DeepCopyInto(out *RTCTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RTCTimer. +func (in *RTCTimer) DeepCopy() *RTCTimer { + if in == nil { + return nil + } + out := new(RTCTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RateLimiter) DeepCopyInto(out *RateLimiter) { + *out = *in + if in.TokenBucketRateLimiter != nil { + in, out := &in.TokenBucketRateLimiter, &out.TokenBucketRateLimiter + *out = new(TokenBucketRateLimiter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RateLimiter. +func (in *RateLimiter) DeepCopy() *RateLimiter { + if in == nil { + return nil + } + out := new(RateLimiter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Realtime) DeepCopyInto(out *Realtime) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Realtime. +func (in *Realtime) DeepCopy() *Realtime { + if in == nil { + return nil + } + out := new(Realtime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReloadableComponentConfiguration) DeepCopyInto(out *ReloadableComponentConfiguration) { + *out = *in + if in.RestClient != nil { + in, out := &in.RestClient, &out.RestClient + *out = new(RESTClientConfiguration) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReloadableComponentConfiguration. +func (in *ReloadableComponentConfiguration) DeepCopy() *ReloadableComponentConfiguration { + if in == nil { + return nil + } + out := new(ReloadableComponentConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoveVolumeOptions) DeepCopyInto(out *RemoveVolumeOptions) { + *out = *in + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoveVolumeOptions. +func (in *RemoveVolumeOptions) DeepCopy() *RemoveVolumeOptions { + if in == nil { + return nil + } + out := new(RemoveVolumeOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(corev1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestartOptions) DeepCopyInto(out *RestartOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestartOptions. +func (in *RestartOptions) DeepCopy() *RestartOptions { + if in == nil { + return nil + } + out := new(RestartOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rng) DeepCopyInto(out *Rng) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rng. +func (in *Rng) DeepCopy() *Rng { + if in == nil { + return nil + } + out := new(Rng) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SEV) DeepCopyInto(out *SEV) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SEV. +func (in *SEV) DeepCopy() *SEV { + if in == nil { + return nil + } + out := new(SEV) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMBiosConfiguration) DeepCopyInto(out *SMBiosConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMBiosConfiguration. +func (in *SMBiosConfiguration) DeepCopy() *SMBiosConfiguration { + if in == nil { + return nil + } + out := new(SMBiosConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHPublicKeyAccessCredential) DeepCopyInto(out *SSHPublicKeyAccessCredential) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + in.PropagationMethod.DeepCopyInto(&out.PropagationMethod) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHPublicKeyAccessCredential. +func (in *SSHPublicKeyAccessCredential) DeepCopy() *SSHPublicKeyAccessCredential { + if in == nil { + return nil + } + out := new(SSHPublicKeyAccessCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHPublicKeyAccessCredentialPropagationMethod) DeepCopyInto(out *SSHPublicKeyAccessCredentialPropagationMethod) { + *out = *in + if in.ConfigDrive != nil { + in, out := &in.ConfigDrive, &out.ConfigDrive + *out = new(ConfigDriveSSHPublicKeyAccessCredentialPropagation) + **out = **in + } + if in.QemuGuestAgent != nil { + in, out := &in.QemuGuestAgent, &out.QemuGuestAgent + *out = new(QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHPublicKeyAccessCredentialPropagationMethod. +func (in *SSHPublicKeyAccessCredentialPropagationMethod) DeepCopy() *SSHPublicKeyAccessCredentialPropagationMethod { + if in == nil { + return nil + } + out := new(SSHPublicKeyAccessCredentialPropagationMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSHPublicKeyAccessCredentialSource) DeepCopyInto(out *SSHPublicKeyAccessCredentialSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(AccessCredentialSecretSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSHPublicKeyAccessCredentialSource. +func (in *SSHPublicKeyAccessCredentialSource) DeepCopy() *SSHPublicKeyAccessCredentialSource { + if in == nil { + return nil + } + out := new(SSHPublicKeyAccessCredentialSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScreenshotOptions) DeepCopyInto(out *ScreenshotOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScreenshotOptions. +func (in *ScreenshotOptions) DeepCopy() *ScreenshotOptions { + if in == nil { + return nil + } + out := new(ScreenshotOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SeccompConfiguration) DeepCopyInto(out *SeccompConfiguration) { + *out = *in + if in.VirtualMachineInstanceProfile != nil { + in, out := &in.VirtualMachineInstanceProfile, &out.VirtualMachineInstanceProfile + *out = new(VirtualMachineInstanceProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SeccompConfiguration. +func (in *SeccompConfiguration) DeepCopy() *SeccompConfiguration { + if in == nil { + return nil + } + out := new(SeccompConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountVolumeSource) DeepCopyInto(out *ServiceAccountVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountVolumeSource. +func (in *ServiceAccountVolumeSource) DeepCopy() *ServiceAccountVolumeSource { + if in == nil { + return nil + } + out := new(ServiceAccountVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoundDevice) DeepCopyInto(out *SoundDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoundDevice. +func (in *SoundDevice) DeepCopy() *SoundDevice { + if in == nil { + return nil + } + out := new(SoundDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StartOptions) DeepCopyInto(out *StartOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StartOptions. +func (in *StartOptions) DeepCopy() *StartOptions { + if in == nil { + return nil + } + out := new(StartOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StopOptions) DeepCopyInto(out *StopOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriod != nil { + in, out := &in.GracePeriod, &out.GracePeriod + *out = new(int64) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StopOptions. +func (in *StopOptions) DeepCopy() *StopOptions { + if in == nil { + return nil + } + out := new(StopOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyNICTimer) DeepCopyInto(out *SyNICTimer) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.Direct != nil { + in, out := &in.Direct, &out.Direct + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyNICTimer. +func (in *SyNICTimer) DeepCopy() *SyNICTimer { + if in == nil { + return nil + } + out := new(SyNICTimer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SysprepSource) DeepCopyInto(out *SysprepSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SysprepSource. +func (in *SysprepSource) DeepCopy() *SysprepSource { + if in == nil { + return nil + } + out := new(SysprepSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfiguration) DeepCopyInto(out *TLSConfiguration) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfiguration. +func (in *TLSConfiguration) DeepCopy() *TLSConfiguration { + if in == nil { + return nil + } + out := new(TLSConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TPMDevice) DeepCopyInto(out *TPMDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TPMDevice. +func (in *TPMDevice) DeepCopy() *TPMDevice { + if in == nil { + return nil + } + out := new(TPMDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timer) DeepCopyInto(out *Timer) { + *out = *in + if in.HPET != nil { + in, out := &in.HPET, &out.HPET + *out = new(HPETTimer) + (*in).DeepCopyInto(*out) + } + if in.KVM != nil { + in, out := &in.KVM, &out.KVM + *out = new(KVMTimer) + (*in).DeepCopyInto(*out) + } + if in.PIT != nil { + in, out := &in.PIT, &out.PIT + *out = new(PITTimer) + (*in).DeepCopyInto(*out) + } + if in.RTC != nil { + in, out := &in.RTC, &out.RTC + *out = new(RTCTimer) + (*in).DeepCopyInto(*out) + } + if in.Hyperv != nil { + in, out := &in.Hyperv, &out.Hyperv + *out = new(HypervTimer) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timer. +func (in *Timer) DeepCopy() *Timer { + if in == nil { + return nil + } + out := new(Timer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenBucketRateLimiter) DeepCopyInto(out *TokenBucketRateLimiter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenBucketRateLimiter. +func (in *TokenBucketRateLimiter) DeepCopy() *TokenBucketRateLimiter { + if in == nil { + return nil + } + out := new(TokenBucketRateLimiter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologyHints) DeepCopyInto(out *TopologyHints) { + *out = *in + if in.TSCFrequency != nil { + in, out := &in.TSCFrequency, &out.TSCFrequency + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologyHints. +func (in *TopologyHints) DeepCopy() *TopologyHints { + if in == nil { + return nil + } + out := new(TopologyHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnpauseOptions) DeepCopyInto(out *UnpauseOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnpauseOptions. +func (in *UnpauseOptions) DeepCopy() *UnpauseOptions { + if in == nil { + return nil + } + out := new(UnpauseOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPasswordAccessCredential) DeepCopyInto(out *UserPasswordAccessCredential) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + in.PropagationMethod.DeepCopyInto(&out.PropagationMethod) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPasswordAccessCredential. +func (in *UserPasswordAccessCredential) DeepCopy() *UserPasswordAccessCredential { + if in == nil { + return nil + } + out := new(UserPasswordAccessCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPasswordAccessCredentialPropagationMethod) DeepCopyInto(out *UserPasswordAccessCredentialPropagationMethod) { + *out = *in + if in.QemuGuestAgent != nil { + in, out := &in.QemuGuestAgent, &out.QemuGuestAgent + *out = new(QemuGuestAgentUserPasswordAccessCredentialPropagation) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPasswordAccessCredentialPropagationMethod. +func (in *UserPasswordAccessCredentialPropagationMethod) DeepCopy() *UserPasswordAccessCredentialPropagationMethod { + if in == nil { + return nil + } + out := new(UserPasswordAccessCredentialPropagationMethod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPasswordAccessCredentialSource) DeepCopyInto(out *UserPasswordAccessCredentialSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(AccessCredentialSecretSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPasswordAccessCredentialSource. +func (in *UserPasswordAccessCredentialSource) DeepCopy() *UserPasswordAccessCredentialSource { + if in == nil { + return nil + } + out := new(UserPasswordAccessCredentialSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUDisplayOptions) DeepCopyInto(out *VGPUDisplayOptions) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + if in.RamFB != nil { + in, out := &in.RamFB, &out.RamFB + *out = new(FeatureState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUDisplayOptions. +func (in *VGPUDisplayOptions) DeepCopy() *VGPUDisplayOptions { + if in == nil { + return nil + } + out := new(VGPUDisplayOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VGPUOptions) DeepCopyInto(out *VGPUOptions) { + *out = *in + if in.Display != nil { + in, out := &in.Display, &out.Display + *out = new(VGPUDisplayOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VGPUOptions. +func (in *VGPUOptions) DeepCopy() *VGPUOptions { + if in == nil { + return nil + } + out := new(VGPUOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMISelector) DeepCopyInto(out *VMISelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMISelector. +func (in *VMISelector) DeepCopy() *VMISelector { + if in == nil { + return nil + } + out := new(VMISelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSOCKOptions) DeepCopyInto(out *VSOCKOptions) { + *out = *in + if in.UseTLS != nil { + in, out := &in.UseTLS, &out.UseTLS + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSOCKOptions. +func (in *VSOCKOptions) DeepCopy() *VSOCKOptions { + if in == nil { + return nil + } + out := new(VSOCKOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachine) DeepCopyInto(out *VirtualMachine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachine. +func (in *VirtualMachine) DeepCopy() *VirtualMachine { + if in == nil { + return nil + } + out := new(VirtualMachine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineCondition) DeepCopyInto(out *VirtualMachineCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineCondition. +func (in *VirtualMachineCondition) DeepCopy() *VirtualMachineCondition { + if in == nil { + return nil + } + out := new(VirtualMachineCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstance) DeepCopyInto(out *VirtualMachineInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstance. +func (in *VirtualMachineInstance) DeepCopy() *VirtualMachineInstance { + if in == nil { + return nil + } + out := new(VirtualMachineInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceCondition) DeepCopyInto(out *VirtualMachineInstanceCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceCondition. +func (in *VirtualMachineInstanceCondition) DeepCopy() *VirtualMachineInstanceCondition { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceFileSystem) DeepCopyInto(out *VirtualMachineInstanceFileSystem) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceFileSystem. +func (in *VirtualMachineInstanceFileSystem) DeepCopy() *VirtualMachineInstanceFileSystem { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceFileSystem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceFileSystemInfo) DeepCopyInto(out *VirtualMachineInstanceFileSystemInfo) { + *out = *in + if in.Filesystems != nil { + in, out := &in.Filesystems, &out.Filesystems + *out = make([]VirtualMachineInstanceFileSystem, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceFileSystemInfo. +func (in *VirtualMachineInstanceFileSystemInfo) DeepCopy() *VirtualMachineInstanceFileSystemInfo { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceFileSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceFileSystemList) DeepCopyInto(out *VirtualMachineInstanceFileSystemList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstanceFileSystem, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceFileSystemList. +func (in *VirtualMachineInstanceFileSystemList) DeepCopy() *VirtualMachineInstanceFileSystemList { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceFileSystemList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceFileSystemList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceGuestAgentInfo) DeepCopyInto(out *VirtualMachineInstanceGuestAgentInfo) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SupportedCommands != nil { + in, out := &in.SupportedCommands, &out.SupportedCommands + *out = make([]GuestAgentCommandInfo, len(*in)) + copy(*out, *in) + } + out.OS = in.OS + if in.UserList != nil { + in, out := &in.UserList, &out.UserList + *out = make([]VirtualMachineInstanceGuestOSUser, len(*in)) + copy(*out, *in) + } + in.FSInfo.DeepCopyInto(&out.FSInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceGuestAgentInfo. +func (in *VirtualMachineInstanceGuestAgentInfo) DeepCopy() *VirtualMachineInstanceGuestAgentInfo { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceGuestAgentInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceGuestAgentInfo) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceGuestOSInfo) DeepCopyInto(out *VirtualMachineInstanceGuestOSInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceGuestOSInfo. +func (in *VirtualMachineInstanceGuestOSInfo) DeepCopy() *VirtualMachineInstanceGuestOSInfo { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceGuestOSInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceGuestOSUser) DeepCopyInto(out *VirtualMachineInstanceGuestOSUser) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceGuestOSUser. +func (in *VirtualMachineInstanceGuestOSUser) DeepCopy() *VirtualMachineInstanceGuestOSUser { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceGuestOSUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceGuestOSUserList) DeepCopyInto(out *VirtualMachineInstanceGuestOSUserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstanceGuestOSUser, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceGuestOSUserList. +func (in *VirtualMachineInstanceGuestOSUserList) DeepCopy() *VirtualMachineInstanceGuestOSUserList { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceGuestOSUserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceGuestOSUserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceList) DeepCopyInto(out *VirtualMachineInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceList. +func (in *VirtualMachineInstanceList) DeepCopy() *VirtualMachineInstanceList { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigration) DeepCopyInto(out *VirtualMachineInstanceMigration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigration. +func (in *VirtualMachineInstanceMigration) DeepCopy() *VirtualMachineInstanceMigration { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceMigration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationCondition) DeepCopyInto(out *VirtualMachineInstanceMigrationCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationCondition. +func (in *VirtualMachineInstanceMigrationCondition) DeepCopy() *VirtualMachineInstanceMigrationCondition { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationList) DeepCopyInto(out *VirtualMachineInstanceMigrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstanceMigration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationList. +func (in *VirtualMachineInstanceMigrationList) DeepCopy() *VirtualMachineInstanceMigrationList { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceMigrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationPhaseTransitionTimestamp) DeepCopyInto(out *VirtualMachineInstanceMigrationPhaseTransitionTimestamp) { + *out = *in + in.PhaseTransitionTimestamp.DeepCopyInto(&out.PhaseTransitionTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationPhaseTransitionTimestamp. +func (in *VirtualMachineInstanceMigrationPhaseTransitionTimestamp) DeepCopy() *VirtualMachineInstanceMigrationPhaseTransitionTimestamp { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationPhaseTransitionTimestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationSpec) DeepCopyInto(out *VirtualMachineInstanceMigrationSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationSpec. +func (in *VirtualMachineInstanceMigrationSpec) DeepCopy() *VirtualMachineInstanceMigrationSpec { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationState) DeepCopyInto(out *VirtualMachineInstanceMigrationState) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.EndTimestamp != nil { + in, out := &in.EndTimestamp, &out.EndTimestamp + *out = (*in).DeepCopy() + } + if in.TargetDirectMigrationNodePorts != nil { + in, out := &in.TargetDirectMigrationNodePorts, &out.TargetDirectMigrationNodePorts + *out = make(map[string]int, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MigrationPolicyName != nil { + in, out := &in.MigrationPolicyName, &out.MigrationPolicyName + *out = new(string) + **out = **in + } + if in.MigrationConfiguration != nil { + in, out := &in.MigrationConfiguration, &out.MigrationConfiguration + *out = new(MigrationConfiguration) + (*in).DeepCopyInto(*out) + } + if in.TargetCPUSet != nil { + in, out := &in.TargetCPUSet, &out.TargetCPUSet + *out = make([]int, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationState. +func (in *VirtualMachineInstanceMigrationState) DeepCopy() *VirtualMachineInstanceMigrationState { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceMigrationStatus) DeepCopyInto(out *VirtualMachineInstanceMigrationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VirtualMachineInstanceMigrationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PhaseTransitionTimestamps != nil { + in, out := &in.PhaseTransitionTimestamps, &out.PhaseTransitionTimestamps + *out = make([]VirtualMachineInstanceMigrationPhaseTransitionTimestamp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MigrationState != nil { + in, out := &in.MigrationState, &out.MigrationState + *out = new(VirtualMachineInstanceMigrationState) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceMigrationStatus. +func (in *VirtualMachineInstanceMigrationStatus) DeepCopy() *VirtualMachineInstanceMigrationStatus { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceMigrationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceNetworkInterface) DeepCopyInto(out *VirtualMachineInstanceNetworkInterface) { + *out = *in + if in.IPs != nil { + in, out := &in.IPs, &out.IPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceNetworkInterface. +func (in *VirtualMachineInstanceNetworkInterface) DeepCopy() *VirtualMachineInstanceNetworkInterface { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceNetworkInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstancePhaseTransitionTimestamp) DeepCopyInto(out *VirtualMachineInstancePhaseTransitionTimestamp) { + *out = *in + in.PhaseTransitionTimestamp.DeepCopyInto(&out.PhaseTransitionTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstancePhaseTransitionTimestamp. +func (in *VirtualMachineInstancePhaseTransitionTimestamp) DeepCopy() *VirtualMachineInstancePhaseTransitionTimestamp { + if in == nil { + return nil + } + out := new(VirtualMachineInstancePhaseTransitionTimestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstancePreset) DeepCopyInto(out *VirtualMachineInstancePreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstancePreset. +func (in *VirtualMachineInstancePreset) DeepCopy() *VirtualMachineInstancePreset { + if in == nil { + return nil + } + out := new(VirtualMachineInstancePreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstancePreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstancePresetList) DeepCopyInto(out *VirtualMachineInstancePresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstancePreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstancePresetList. +func (in *VirtualMachineInstancePresetList) DeepCopy() *VirtualMachineInstancePresetList { + if in == nil { + return nil + } + out := new(VirtualMachineInstancePresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstancePresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstancePresetSpec) DeepCopyInto(out *VirtualMachineInstancePresetSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.Domain != nil { + in, out := &in.Domain, &out.Domain + *out = new(DomainSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstancePresetSpec. +func (in *VirtualMachineInstancePresetSpec) DeepCopy() *VirtualMachineInstancePresetSpec { + if in == nil { + return nil + } + out := new(VirtualMachineInstancePresetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceProfile) DeepCopyInto(out *VirtualMachineInstanceProfile) { + *out = *in + if in.CustomProfile != nil { + in, out := &in.CustomProfile, &out.CustomProfile + *out = new(CustomProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceProfile. +func (in *VirtualMachineInstanceProfile) DeepCopy() *VirtualMachineInstanceProfile { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceReplicaSet) DeepCopyInto(out *VirtualMachineInstanceReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceReplicaSet. +func (in *VirtualMachineInstanceReplicaSet) DeepCopy() *VirtualMachineInstanceReplicaSet { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceReplicaSetCondition) DeepCopyInto(out *VirtualMachineInstanceReplicaSetCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceReplicaSetCondition. +func (in *VirtualMachineInstanceReplicaSetCondition) DeepCopy() *VirtualMachineInstanceReplicaSetCondition { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceReplicaSetList) DeepCopyInto(out *VirtualMachineInstanceReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachineInstanceReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceReplicaSetList. +func (in *VirtualMachineInstanceReplicaSetList) DeepCopy() *VirtualMachineInstanceReplicaSetList { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineInstanceReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceReplicaSetSpec) DeepCopyInto(out *VirtualMachineInstanceReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(VirtualMachineInstanceTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceReplicaSetSpec. +func (in *VirtualMachineInstanceReplicaSetSpec) DeepCopy() *VirtualMachineInstanceReplicaSetSpec { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceReplicaSetStatus) DeepCopyInto(out *VirtualMachineInstanceReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VirtualMachineInstanceReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceReplicaSetStatus. +func (in *VirtualMachineInstanceReplicaSetStatus) DeepCopy() *VirtualMachineInstanceReplicaSetStatus { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceSpec) DeepCopyInto(out *VirtualMachineInstanceSpec) { + *out = *in + in.Domain.DeepCopyInto(&out.Domain) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EvictionStrategy != nil { + in, out := &in.EvictionStrategy, &out.EvictionStrategy + *out = new(EvictionStrategy) + **out = **in + } + if in.StartStrategy != nil { + in, out := &in.StartStrategy, &out.StartStrategy + *out = new(StartStrategy) + **out = **in + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(corev1.PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.AccessCredentials != nil { + in, out := &in.AccessCredentials, &out.AccessCredentials + *out = make([]AccessCredential, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceSpec. +func (in *VirtualMachineInstanceSpec) DeepCopy() *VirtualMachineInstanceSpec { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceStatus) DeepCopyInto(out *VirtualMachineInstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VirtualMachineInstanceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PhaseTransitionTimestamps != nil { + in, out := &in.PhaseTransitionTimestamps, &out.PhaseTransitionTimestamps + *out = make([]VirtualMachineInstancePhaseTransitionTimestamp, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Interfaces != nil { + in, out := &in.Interfaces, &out.Interfaces + *out = make([]VirtualMachineInstanceNetworkInterface, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GuestOSInfo = in.GuestOSInfo + if in.MigrationState != nil { + in, out := &in.MigrationState, &out.MigrationState + *out = new(VirtualMachineInstanceMigrationState) + (*in).DeepCopyInto(*out) + } + if in.QOSClass != nil { + in, out := &in.QOSClass, &out.QOSClass + *out = new(corev1.PodQOSClass) + **out = **in + } + if in.ActivePods != nil { + in, out := &in.ActivePods, &out.ActivePods + *out = make(map[types.UID]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.VolumeStatus != nil { + in, out := &in.VolumeStatus, &out.VolumeStatus + *out = make([]VolumeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologyHints != nil { + in, out := &in.TopologyHints, &out.TopologyHints + *out = new(TopologyHints) + (*in).DeepCopyInto(*out) + } + if in.VSOCKCID != nil { + in, out := &in.VSOCKCID, &out.VSOCKCID + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceStatus. +func (in *VirtualMachineInstanceStatus) DeepCopy() *VirtualMachineInstanceStatus { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineInstanceTemplateSpec) DeepCopyInto(out *VirtualMachineInstanceTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineInstanceTemplateSpec. +func (in *VirtualMachineInstanceTemplateSpec) DeepCopy() *VirtualMachineInstanceTemplateSpec { + if in == nil { + return nil + } + out := new(VirtualMachineInstanceTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineList) DeepCopyInto(out *VirtualMachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VirtualMachine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineList. +func (in *VirtualMachineList) DeepCopy() *VirtualMachineList { + if in == nil { + return nil + } + out := new(VirtualMachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VirtualMachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineMemoryDumpRequest) DeepCopyInto(out *VirtualMachineMemoryDumpRequest) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.EndTimestamp != nil { + in, out := &in.EndTimestamp, &out.EndTimestamp + *out = (*in).DeepCopy() + } + if in.FileName != nil { + in, out := &in.FileName, &out.FileName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineMemoryDumpRequest. +func (in *VirtualMachineMemoryDumpRequest) DeepCopy() *VirtualMachineMemoryDumpRequest { + if in == nil { + return nil + } + out := new(VirtualMachineMemoryDumpRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineSpec) DeepCopyInto(out *VirtualMachineSpec) { + *out = *in + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(bool) + **out = **in + } + if in.RunStrategy != nil { + in, out := &in.RunStrategy, &out.RunStrategy + *out = new(VirtualMachineRunStrategy) + **out = **in + } + if in.Instancetype != nil { + in, out := &in.Instancetype, &out.Instancetype + *out = new(InstancetypeMatcher) + **out = **in + } + if in.Preference != nil { + in, out := &in.Preference, &out.Preference + *out = new(PreferenceMatcher) + **out = **in + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(VirtualMachineInstanceTemplateSpec) + (*in).DeepCopyInto(*out) + } + if in.DataVolumeTemplates != nil { + in, out := &in.DataVolumeTemplates, &out.DataVolumeTemplates + *out = make([]DataVolumeTemplateSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineSpec. +func (in *VirtualMachineSpec) DeepCopy() *VirtualMachineSpec { + if in == nil { + return nil + } + out := new(VirtualMachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineStartFailure) DeepCopyInto(out *VirtualMachineStartFailure) { + *out = *in + if in.RetryAfterTimestamp != nil { + in, out := &in.RetryAfterTimestamp, &out.RetryAfterTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineStartFailure. +func (in *VirtualMachineStartFailure) DeepCopy() *VirtualMachineStartFailure { + if in == nil { + return nil + } + out := new(VirtualMachineStartFailure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineStateChangeRequest) DeepCopyInto(out *VirtualMachineStateChangeRequest) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineStateChangeRequest. +func (in *VirtualMachineStateChangeRequest) DeepCopy() *VirtualMachineStateChangeRequest { + if in == nil { + return nil + } + out := new(VirtualMachineStateChangeRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineStatus) DeepCopyInto(out *VirtualMachineStatus) { + *out = *in + if in.SnapshotInProgress != nil { + in, out := &in.SnapshotInProgress, &out.SnapshotInProgress + *out = new(string) + **out = **in + } + if in.RestoreInProgress != nil { + in, out := &in.RestoreInProgress, &out.RestoreInProgress + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VirtualMachineCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StateChangeRequests != nil { + in, out := &in.StateChangeRequests, &out.StateChangeRequests + *out = make([]VirtualMachineStateChangeRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeRequests != nil { + in, out := &in.VolumeRequests, &out.VolumeRequests + *out = make([]VirtualMachineVolumeRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeSnapshotStatuses != nil { + in, out := &in.VolumeSnapshotStatuses, &out.VolumeSnapshotStatuses + *out = make([]VolumeSnapshotStatus, len(*in)) + copy(*out, *in) + } + if in.StartFailure != nil { + in, out := &in.StartFailure, &out.StartFailure + *out = new(VirtualMachineStartFailure) + (*in).DeepCopyInto(*out) + } + if in.MemoryDumpRequest != nil { + in, out := &in.MemoryDumpRequest, &out.MemoryDumpRequest + *out = new(VirtualMachineMemoryDumpRequest) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineStatus. +func (in *VirtualMachineStatus) DeepCopy() *VirtualMachineStatus { + if in == nil { + return nil + } + out := new(VirtualMachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VirtualMachineVolumeRequest) DeepCopyInto(out *VirtualMachineVolumeRequest) { + *out = *in + if in.AddVolumeOptions != nil { + in, out := &in.AddVolumeOptions, &out.AddVolumeOptions + *out = new(AddVolumeOptions) + (*in).DeepCopyInto(*out) + } + if in.RemoveVolumeOptions != nil { + in, out := &in.RemoveVolumeOptions, &out.RemoveVolumeOptions + *out = new(RemoveVolumeOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualMachineVolumeRequest. +func (in *VirtualMachineVolumeRequest) DeepCopy() *VirtualMachineVolumeRequest { + if in == nil { + return nil + } + out := new(VirtualMachineVolumeRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotStatus) DeepCopyInto(out *VolumeSnapshotStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotStatus. +func (in *VolumeSnapshotStatus) DeepCopy() *VolumeSnapshotStatus { + if in == nil { + return nil + } + out := new(VolumeSnapshotStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostDisk != nil { + in, out := &in.HostDisk, &out.HostDisk + *out = new(HostDisk) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.CloudInitNoCloud != nil { + in, out := &in.CloudInitNoCloud, &out.CloudInitNoCloud + *out = new(CloudInitNoCloudSource) + (*in).DeepCopyInto(*out) + } + if in.CloudInitConfigDrive != nil { + in, out := &in.CloudInitConfigDrive, &out.CloudInitConfigDrive + *out = new(CloudInitConfigDriveSource) + (*in).DeepCopyInto(*out) + } + if in.Sysprep != nil { + in, out := &in.Sysprep, &out.Sysprep + *out = new(SysprepSource) + (*in).DeepCopyInto(*out) + } + if in.ContainerDisk != nil { + in, out := &in.ContainerDisk, &out.ContainerDisk + *out = new(ContainerDiskSource) + **out = **in + } + if in.Ephemeral != nil { + in, out := &in.Ephemeral, &out.Ephemeral + *out = new(EphemeralVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDisk != nil { + in, out := &in.EmptyDisk, &out.EmptyDisk + *out = new(EmptyDiskSource) + (*in).DeepCopyInto(*out) + } + if in.DataVolume != nil { + in, out := &in.DataVolume, &out.DataVolume + *out = new(DataVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountVolumeSource) + **out = **in + } + if in.DownwardMetrics != nil { + in, out := &in.DownwardMetrics, &out.DownwardMetrics + *out = new(DownwardMetricsVolumeSource) + **out = **in + } + if in.MemoryDump != nil { + in, out := &in.MemoryDump, &out.MemoryDump + *out = new(MemoryDumpVolumeSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeStatus) DeepCopyInto(out *VolumeStatus) { + *out = *in + if in.PersistentVolumeClaimInfo != nil { + in, out := &in.PersistentVolumeClaimInfo, &out.PersistentVolumeClaimInfo + *out = new(PersistentVolumeClaimInfo) + (*in).DeepCopyInto(*out) + } + if in.HotplugVolume != nil { + in, out := &in.HotplugVolume, &out.HotplugVolume + *out = new(HotplugVolumeStatus) + **out = **in + } + if in.MemoryDumpVolume != nil { + in, out := &in.MemoryDumpVolume, &out.MemoryDumpVolume + *out = new(DomainMemoryDumpInfo) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeStatus. +func (in *VolumeStatus) DeepCopy() *VolumeStatus { + if in == nil { + return nil + } + out := new(VolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Watchdog) DeepCopyInto(out *Watchdog) { + *out = *in + in.WatchdogDevice.DeepCopyInto(&out.WatchdogDevice) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Watchdog. +func (in *Watchdog) DeepCopy() *Watchdog { + if in == nil { + return nil + } + out := new(Watchdog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchdogDevice) DeepCopyInto(out *WatchdogDevice) { + *out = *in + if in.I6300ESB != nil { + in, out := &in.I6300ESB, &out.I6300ESB + *out = new(I6300ESBWatchdog) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchdogDevice. +func (in *WatchdogDevice) DeepCopy() *WatchdogDevice { + if in == nil { + return nil + } + out := new(WatchdogDevice) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/kubevirt.io/api/core/v1/defaults.go b/vendor/kubevirt.io/api/core/v1/defaults.go new file mode 100644 index 000000000..c87451c61 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/defaults.go @@ -0,0 +1,240 @@ +package v1 + +import ( + "github.com/pborman/uuid" + "k8s.io/apimachinery/pkg/types" +) + +var _true = t(true) +var _false = t(false) + +func SetDefaults_HPETTimer(obj *HPETTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_PITTimer(obj *PITTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_KVMTimer(obj *KVMTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_HypervTimer(obj *HypervTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_RTCTimer(obj *RTCTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_FeatureState(obj *FeatureState) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_SyNICTimer(obj *SyNICTimer) { + if obj.Enabled == nil { + obj.Enabled = _true + } + + if obj.Direct != nil && obj.Direct.Enabled == nil { + obj.Direct.Enabled = _true + } +} + +func SetDefaults_FeatureAPIC(obj *FeatureAPIC) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_FeatureVendorID(obj *FeatureVendorID) { + if obj.Enabled == nil { + obj.Enabled = _true + } +} + +func SetDefaults_DiskDevice(obj *DiskDevice) { + if obj.Disk == nil && + obj.CDRom == nil && + obj.LUN == nil { + obj.Disk = &DiskTarget{} + } +} + +func SetDefaults_Watchdog(obj *Watchdog) { + if obj.I6300ESB == nil { + obj.I6300ESB = &I6300ESBWatchdog{} + } +} + +func SetDefaults_CDRomTarget(obj *CDRomTarget) { + if obj.ReadOnly == nil { + obj.ReadOnly = _true + } + if obj.Tray == "" { + obj.Tray = TrayStateClosed + } +} + +func SetDefaults_FeatureSpinlocks(obj *FeatureSpinlocks) { + if obj.Enabled == nil { + obj.Enabled = _true + } + if *obj.Enabled == *_true && obj.Retries == nil { + obj.Retries = ui32(4096) + } +} + +func SetDefaults_I6300ESBWatchdog(obj *I6300ESBWatchdog) { + if obj.Action == "" { + obj.Action = WatchdogActionReset + } +} + +func SetDefaults_Firmware(obj *Firmware) { + if obj.UUID == "" { + obj.UUID = types.UID(uuid.NewRandom().String()) + } +} + +func SetDefaults_VirtualMachineInstance(obj *VirtualMachineInstance) { + if obj.Spec.Domain.Firmware == nil { + obj.Spec.Domain.Firmware = &Firmware{} + } + + if obj.Spec.Domain.Features == nil { + obj.Spec.Domain.Features = &Features{} + } + + setDefaults_Disk(obj) + setDefaults_Input(obj) + SetDefaults_Probe(obj.Spec.ReadinessProbe) + SetDefaults_Probe(obj.Spec.LivenessProbe) +} + +func setDefaults_Disk(obj *VirtualMachineInstance) { + for i := range obj.Spec.Domain.Devices.Disks { + disk := &obj.Spec.Domain.Devices.Disks[i].DiskDevice + SetDefaults_DiskDevice(disk) + } +} + +func setDefaults_Input(obj *VirtualMachineInstance) { + for i := range obj.Spec.Domain.Devices.Inputs { + input := &obj.Spec.Domain.Devices.Inputs[i] + + if input.Bus == "" { + input.Bus = InputBusUSB + } + + if input.Type == "" { + input.Type = InputTypeTablet + } + } +} + +func SetDefaults_Probe(probe *Probe) { + if probe == nil { + return + } + + if probe.TimeoutSeconds < 1 { + probe.TimeoutSeconds = 1 + } + + if probe.PeriodSeconds < 1 { + probe.PeriodSeconds = 10 + } + + if probe.SuccessThreshold < 1 { + probe.SuccessThreshold = 1 + } + + if probe.FailureThreshold < 1 { + probe.FailureThreshold = 3 + } +} + +func SetDefaults_NetworkInterface(obj *VirtualMachineInstance) { + autoAttach := obj.Spec.Domain.Devices.AutoattachPodInterface + if autoAttach != nil && *autoAttach == false { + return + } + + // Override only when nothing is specified + if len(obj.Spec.Networks) == 0 { + obj.Spec.Domain.Devices.Interfaces = []Interface{*DefaultBridgeNetworkInterface()} + obj.Spec.Networks = []Network{*DefaultPodNetwork()} + } +} + +func DefaultBridgeNetworkInterface() *Interface { + iface := &Interface{ + Name: "default", + InterfaceBindingMethod: InterfaceBindingMethod{ + Bridge: &InterfaceBridge{}, + }, + } + return iface +} + +func DefaultSlirpNetworkInterface() *Interface { + iface := &Interface{ + Name: "default", + InterfaceBindingMethod: InterfaceBindingMethod{ + Slirp: &InterfaceSlirp{}, + }, + } + return iface +} + +func DefaultMasqueradeNetworkInterface() *Interface { + iface := &Interface{ + Name: "default", + InterfaceBindingMethod: InterfaceBindingMethod{ + Masquerade: &InterfaceMasquerade{}, + }, + } + return iface +} + +func DefaultMacvtapNetworkInterface(ifaceName string) *Interface { + iface := &Interface{ + Name: ifaceName, + InterfaceBindingMethod: InterfaceBindingMethod{ + Macvtap: &InterfaceMacvtap{}, + }, + } + return iface +} + +func DefaultPodNetwork() *Network { + defaultNet := &Network{ + Name: "default", + NetworkSource: NetworkSource{ + Pod: &PodNetwork{}, + }, + } + return defaultNet +} + +func t(v bool) *bool { + return &v +} + +func ui32(v uint32) *uint32 { + return &v +} diff --git a/vendor/kubevirt.io/api/core/v1/doc.go b/vendor/kubevirt.io/api/core/v1/doc.go new file mode 100644 index 000000000..d434314c9 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta + +// +groupName=kubevirt.io +// +versionName=v1alpha3 +// +k8s:openapi-gen=true +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/kubevirt.io/api/core/v1/register.go b/vendor/kubevirt.io/api/core/v1/register.go new file mode 100644 index 000000000..2bc8efc4f --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/register.go @@ -0,0 +1,121 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2019 Red Hat, Inc. + * + */ +package v1 + +import ( + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "kubevirt.io/api/core" +) + +const SubresourceGroupName = "subresources.kubevirt.io" +const KubeVirtClientGoSchemeRegistrationVersionEnvVar = "KUBEVIRT_CLIENT_GO_SCHEME_REGISTRATION_VERSION" + +var ( + ApiLatestVersion = "v1" + ApiSupportedWebhookVersions = []string{"v1alpha3", "v1"} + ApiStorageVersion = "v1alpha3" + ApiSupportedVersions = []extv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: false, + }, + { + Name: "v1alpha3", + Served: true, + Storage: true, + }, + } +) + +var ( + // GroupVersion is the latest group version for the KubeVirt api + GroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiLatestVersion} + SchemeGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiLatestVersion} + + // StorageGroupVersion is the group version our api is persistented internally as + StorageGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: ApiStorageVersion} + + // GroupVersions is group version list used to register these objects + // The preferred group version is the first item in the list. + GroupVersions = []schema.GroupVersion{{Group: core.GroupName, Version: "v1"}, {Group: core.GroupName, Version: "v1alpha3"}} + + // SubresourceGroupVersions is group version list used to register these objects + // The preferred group version is the first item in the list. + SubresourceGroupVersions = []schema.GroupVersion{{Group: SubresourceGroupName, Version: ApiLatestVersion}, {Group: SubresourceGroupName, Version: ApiStorageVersion}} + + // SubresourceStorageGroupVersion is the group version our api is persistented internally as + SubresourceStorageGroupVersion = schema.GroupVersion{Group: SubresourceGroupName, Version: ApiStorageVersion} +) + +var ( + // GroupVersionKind + VirtualMachineInstanceGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstance"} + VirtualMachineInstanceReplicaSetGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstanceReplicaSet"} + VirtualMachineInstancePresetGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstancePreset"} + VirtualMachineGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachine"} + VirtualMachineInstanceMigrationGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "VirtualMachineInstanceMigration"} + KubeVirtGroupVersionKind = schema.GroupVersionKind{Group: core.GroupName, Version: GroupVersion.Version, Kind: "KubeVirt"} +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(AddKnownTypesGenerator([]schema.GroupVersion{GroupVersion})) + AddToScheme = SchemeBuilder.AddToScheme +) + +func AddKnownTypesGenerator(groupVersions []schema.GroupVersion) func(scheme *runtime.Scheme) error { + + // Adds the list of known types to api.Scheme. + return func(scheme *runtime.Scheme) error { + + for _, groupVersion := range groupVersions { + scheme.AddKnownTypes(groupVersion, + &VirtualMachineInstance{}, + &VirtualMachineInstanceList{}, + &VirtualMachineInstanceReplicaSet{}, + &VirtualMachineInstanceReplicaSetList{}, + &VirtualMachineInstancePreset{}, + &VirtualMachineInstancePresetList{}, + &VirtualMachineInstanceMigration{}, + &VirtualMachineInstanceMigrationList{}, + &VirtualMachine{}, + &VirtualMachineList{}, + &KubeVirt{}, + &KubeVirtList{}, + ) + metav1.AddToGroupVersion(scheme, groupVersion) + } + + return nil + } +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return GroupVersion.WithResource(resource).GroupResource() +} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} diff --git a/vendor/kubevirt.io/api/core/v1/sanitizers.go b/vendor/kubevirt.io/api/core/v1/sanitizers.go new file mode 100644 index 000000000..5df10477e --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/sanitizers.go @@ -0,0 +1,45 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2022 Red Hat, Inc. + * + */ + +package v1 + +import ( + "fmt" + + netutils "k8s.io/utils/net" +) + +func sanitizeIP(address string) (string, error) { + sanitizedAddress := netutils.ParseIPSloppy(address) + if sanitizedAddress == nil { + return "", fmt.Errorf("not a valid IP address") + } + + return sanitizedAddress.String(), nil +} + +func sanitizeCIDR(cidr string) (string, error) { + ip, net, err := netutils.ParseCIDRSloppy(cidr) + if err != nil { + return "", err + } + + netMaskSize, _ := net.Mask.Size() + return fmt.Sprintf("%s/%d", ip.String(), netMaskSize), nil +} diff --git a/vendor/kubevirt.io/api/core/v1/schema.go b/vendor/kubevirt.io/api/core/v1/schema.go new file mode 100644 index 000000000..73e63a29c --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/schema.go @@ -0,0 +1,1441 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2017, 2018 Red Hat, Inc. + * + */ + +package v1 + +import ( + "encoding/json" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" +) + +type IOThreadsPolicy string + +const ( + IOThreadsPolicyShared IOThreadsPolicy = "shared" + IOThreadsPolicyAuto IOThreadsPolicy = "auto" + CPUModeHostPassthrough = "host-passthrough" + CPUModeHostModel = "host-model" + DefaultCPUModel = CPUModeHostModel +) + +const HotplugDiskDir = "/var/run/kubevirt/hotplug-disks/" + +/* + ATTENTION: Rerun code generators when comments on structs or fields are modified. +*/ + +// Represents a disk created on the cluster level +type HostDisk struct { + // The path to HostDisk image located on the cluster + Path string `json:"path"` + // Contains information if disk.img exists or should be created + // allowed options are 'Disk' and 'DiskOrCreate' + Type HostDiskType `json:"type"` + // Capacity of the sparse disk + // +optional + Capacity resource.Quantity `json:"capacity,omitempty"` + // Shared indicate whether the path is shared between nodes + Shared *bool `json:"shared,omitempty"` +} + +// ConfigMapVolumeSource adapts a ConfigMap into a volume. +// More info: https://kubernetes.io/docs/concepts/storage/volumes/#configmap +type ConfigMapVolumeSource struct { + v1.LocalObjectReference `json:",inline"` + // Specify whether the ConfigMap or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty"` + // The volume label of the resulting disk inside the VMI. + // Different bootstrapping mechanisms require different values. + // Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart). + // +optional + VolumeLabel string `json:"volumeLabel,omitempty"` +} + +// SecretVolumeSource adapts a Secret into a volume. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + SecretName string `json:"secretName,omitempty"` + // Specify whether the Secret or it's keys must be defined + // +optional + Optional *bool `json:"optional,omitempty"` + // The volume label of the resulting disk inside the VMI. + // Different bootstrapping mechanisms require different values. + // Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart). + // +optional + VolumeLabel string `json:"volumeLabel,omitempty"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +type DownwardAPIVolumeSource struct { + // Fields is a list of downward API volume file + // +optional + Fields []v1.DownwardAPIVolumeFile `json:"fields,omitempty"` + // The volume label of the resulting disk inside the VMI. + // Different bootstrapping mechanisms require different values. + // Typical values are "cidata" (cloud-init), "config-2" (cloud-init) or "OEMDRV" (kickstart). + // +optional + VolumeLabel string `json:"volumeLabel,omitempty"` +} + +// ServiceAccountVolumeSource adapts a ServiceAccount into a volume. +type ServiceAccountVolumeSource struct { + // Name of the service account in the pod's namespace to use. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ServiceAccountName string `json:"serviceAccountName,omitempty"` +} + +// DownwardMetricsVolumeSource adds a very small disk to VMIs which contains a limited view of host and guest +// metrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics. +type DownwardMetricsVolumeSource struct { +} + +// Represents a Sysprep volume source. +type SysprepSource struct { + // Secret references a k8s Secret that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type. + // + optional + Secret *v1.LocalObjectReference `json:"secret,omitempty"` + // ConfigMap references a ConfigMap that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type. + // + optional + ConfigMap *v1.LocalObjectReference `json:"configMap,omitempty"` +} + +// Represents a cloud-init nocloud user data source. +// More info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html +type CloudInitNoCloudSource struct { + // UserDataSecretRef references a k8s secret that contains NoCloud userdata. + // + optional + UserDataSecretRef *v1.LocalObjectReference `json:"secretRef,omitempty"` + // UserDataBase64 contains NoCloud cloud-init userdata as a base64 encoded string. + // + optional + UserDataBase64 string `json:"userDataBase64,omitempty"` + // UserData contains NoCloud inline cloud-init userdata. + // + optional + UserData string `json:"userData,omitempty"` + // NetworkDataSecretRef references a k8s secret that contains NoCloud networkdata. + // + optional + NetworkDataSecretRef *v1.LocalObjectReference `json:"networkDataSecretRef,omitempty"` + // NetworkDataBase64 contains NoCloud cloud-init networkdata as a base64 encoded string. + // + optional + NetworkDataBase64 string `json:"networkDataBase64,omitempty"` + // NetworkData contains NoCloud inline cloud-init networkdata. + // + optional + NetworkData string `json:"networkData,omitempty"` +} + +// Represents a cloud-init config drive user data source. +// More info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html +type CloudInitConfigDriveSource struct { + // UserDataSecretRef references a k8s secret that contains config drive userdata. + // + optional + UserDataSecretRef *v1.LocalObjectReference `json:"secretRef,omitempty"` + // UserDataBase64 contains config drive cloud-init userdata as a base64 encoded string. + // + optional + UserDataBase64 string `json:"userDataBase64,omitempty"` + // UserData contains config drive inline cloud-init userdata. + // + optional + UserData string `json:"userData,omitempty"` + // NetworkDataSecretRef references a k8s secret that contains config drive networkdata. + // + optional + NetworkDataSecretRef *v1.LocalObjectReference `json:"networkDataSecretRef,omitempty"` + // NetworkDataBase64 contains config drive cloud-init networkdata as a base64 encoded string. + // + optional + NetworkDataBase64 string `json:"networkDataBase64,omitempty"` + // NetworkData contains config drive inline cloud-init networkdata. + // + optional + NetworkData string `json:"networkData,omitempty"` +} + +type DomainSpec struct { + // Resources describes the Compute Resources required by this vmi. + Resources ResourceRequirements `json:"resources,omitempty"` + // CPU allow specified the detailed CPU topology inside the vmi. + // +optional + CPU *CPU `json:"cpu,omitempty"` + // Memory allow specifying the VMI memory features. + // +optional + Memory *Memory `json:"memory,omitempty"` + // Machine type. + // +optional + Machine *Machine `json:"machine,omitempty"` + // Firmware. + // +optional + Firmware *Firmware `json:"firmware,omitempty"` + // Clock sets the clock and timers of the vmi. + // +optional + Clock *Clock `json:"clock,omitempty"` + // Features like acpi, apic, hyperv, smm. + // +optional + Features *Features `json:"features,omitempty"` + // Devices allows adding disks, network interfaces, and others + Devices Devices `json:"devices"` + // Controls whether or not disks will share IOThreads. + // Omitting IOThreadsPolicy disables use of IOThreads. + // One of: shared, auto + // +optional + IOThreadsPolicy *IOThreadsPolicy `json:"ioThreadsPolicy,omitempty"` + // Chassis specifies the chassis info passed to the domain. + // +optional + Chassis *Chassis `json:"chassis,omitempty"` + // Launch Security setting of the vmi. + // +optional + LaunchSecurity *LaunchSecurity `json:"launchSecurity,omitempty"` +} + +// Chassis specifies the chassis info passed to the domain. +type Chassis struct { + Manufacturer string `json:"manufacturer,omitempty"` + Version string `json:"version,omitempty"` + Serial string `json:"serial,omitempty"` + Asset string `json:"asset,omitempty"` + Sku string `json:"sku,omitempty"` +} + +// Represents the firmware blob used to assist in the domain creation process. +// Used for setting the QEMU BIOS file path for the libvirt domain. +type Bootloader struct { + // If set (default), BIOS will be used. + // +optional + BIOS *BIOS `json:"bios,omitempty"` + // If set, EFI will be used instead of BIOS. + // +optional + EFI *EFI `json:"efi,omitempty"` +} + +// If set (default), BIOS will be used. +type BIOS struct { + // If set, the BIOS output will be transmitted over serial + // +optional + UseSerial *bool `json:"useSerial,omitempty"` +} + +// If set, EFI will be used instead of BIOS. +type EFI struct { + // If set, SecureBoot will be enabled and the OVMF roms will be swapped for + // SecureBoot-enabled ones. + // Requires SMM to be enabled. + // Defaults to true + // +optional + SecureBoot *bool `json:"secureBoot,omitempty"` +} + +// If set, the VM will be booted from the defined kernel / initrd. +type KernelBootContainer struct { + // Image that contains initrd / kernel files. + Image string `json:"image"` + // ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist. + //+optional + ImagePullSecret string `json:"imagePullSecret,omitempty"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` + // The fully-qualified path to the kernel image in the host OS + //+optional + KernelPath string `json:"kernelPath,omitempty"` + // the fully-qualified path to the ramdisk image in the host OS + //+optional + InitrdPath string `json:"initrdPath,omitempty"` +} + +// Represents the firmware blob used to assist in the kernel boot process. +// Used for setting the kernel, initrd and command line arguments +type KernelBoot struct { + // Arguments to be passed to the kernel at boot time + KernelArgs string `json:"kernelArgs,omitempty"` + // Container defines the container that containes kernel artifacts + Container *KernelBootContainer `json:"container,omitempty"` +} + +type ResourceRequirements struct { + // Requests is a description of the initial vmi resources. + // Valid resource keys are "memory" and "cpu". + // +optional + Requests v1.ResourceList `json:"requests,omitempty"` + // Limits describes the maximum amount of compute resources allowed. + // Valid resource keys are "memory" and "cpu". + // +optional + Limits v1.ResourceList `json:"limits,omitempty"` + // Don't ask the scheduler to take the guest-management overhead into account. Instead + // put the overhead only into the container's memory limit. This can lead to crashes if + // all memory is in use on a node. Defaults to false. + OvercommitGuestOverhead bool `json:"overcommitGuestOverhead,omitempty"` +} + +// CPU allows specifying the CPU topology. +type CPU struct { + // Cores specifies the number of cores inside the vmi. + // Must be a value greater or equal 1. + Cores uint32 `json:"cores,omitempty"` + // Sockets specifies the number of sockets inside the vmi. + // Must be a value greater or equal 1. + Sockets uint32 `json:"sockets,omitempty"` + // Threads specifies the number of threads inside the vmi. + // Must be a value greater or equal 1. + Threads uint32 `json:"threads,omitempty"` + // Model specifies the CPU model inside the VMI. + // List of available models https://github.com/libvirt/libvirt/tree/master/src/cpu_map. + // It is possible to specify special cases like "host-passthrough" to get the same CPU as the node + // and "host-model" to get CPU closest to the node one. + // Defaults to host-model. + // +optional + Model string `json:"model,omitempty"` + // Features specifies the CPU features list inside the VMI. + // +optional + Features []CPUFeature `json:"features,omitempty"` + // DedicatedCPUPlacement requests the scheduler to place the VirtualMachineInstance on a node + // with enough dedicated pCPUs and pin the vCPUs to it. + // +optional + DedicatedCPUPlacement bool `json:"dedicatedCpuPlacement,omitempty"` + + // NUMA allows specifying settings for the guest NUMA topology + // +optional + NUMA *NUMA `json:"numa,omitempty"` + + // IsolateEmulatorThread requests one more dedicated pCPU to be allocated for the VMI to place + // the emulator thread on it. + // +optional + IsolateEmulatorThread bool `json:"isolateEmulatorThread,omitempty"` + // Realtime instructs the virt-launcher to tune the VMI for lower latency, optional for real time workloads + // +optional + Realtime *Realtime `json:"realtime,omitempty"` +} + +// Realtime holds the tuning knobs specific for realtime workloads. +type Realtime struct { + // Mask defines the vcpu mask expression that defines which vcpus are used for realtime. Format matches libvirt's expressions. + // Example: "0-3,^1","0,2,3","2-3" + // +optional + Mask string `json:"mask,omitempty"` +} + +// NUMAGuestMappingPassthrough instructs kubevirt to model numa topology which is compatible with the CPU pinning on the guest. +// This will result in a subset of the node numa topology being passed through, ensuring that virtual numa nodes and their memory +// never cross boundaries coming from the node numa mapping. +type NUMAGuestMappingPassthrough struct { +} + +type NUMA struct { + // GuestMappingPassthrough will create an efficient guest topology based on host CPUs exclusively assigned to a pod. + // The created topology ensures that memory and CPUs on the virtual numa nodes never cross boundaries of host numa nodes. + // +opitonal + GuestMappingPassthrough *NUMAGuestMappingPassthrough `json:"guestMappingPassthrough,omitempty"` +} + +// CPUFeature allows specifying a CPU feature. +type CPUFeature struct { + // Name of the CPU feature + Name string `json:"name"` + // Policy is the CPU feature attribute which can have the following attributes: + // force - The virtual CPU will claim the feature is supported regardless of it being supported by host CPU. + // require - Guest creation will fail unless the feature is supported by the host CPU or the hypervisor is able to emulate it. + // optional - The feature will be supported by virtual CPU if and only if it is supported by host CPU. + // disable - The feature will not be supported by virtual CPU. + // forbid - Guest creation will fail if the feature is supported by host CPU. + // Defaults to require + // +optional + Policy string `json:"policy,omitempty"` +} + +// Memory allows specifying the VirtualMachineInstance memory features. +type Memory struct { + // Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory. + // +optional + Hugepages *Hugepages `json:"hugepages,omitempty"` + // Guest allows to specifying the amount of memory which is visible inside the Guest OS. + // The Guest must lie between Requests and Limits from the resources section. + // Defaults to the requested memory in the resources section if not specified. + // + optional + Guest *resource.Quantity `json:"guest,omitempty"` +} + +// Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory. +type Hugepages struct { + // PageSize specifies the hugepage size, for x86_64 architecture valid values are 1Gi and 2Mi. + PageSize string `json:"pageSize,omitempty"` +} + +type Machine struct { + // QEMU machine type is the actual chipset of the VirtualMachineInstance. + // +optional + Type string `json:"type"` +} + +type Firmware struct { + // UUID reported by the vmi bios. + // Defaults to a random generated uid. + UUID types.UID `json:"uuid,omitempty"` + // Settings to control the bootloader that is used. + // +optional + Bootloader *Bootloader `json:"bootloader,omitempty"` + // The system-serial-number in SMBIOS + Serial string `json:"serial,omitempty"` + // Settings to set the kernel for booting. + // +optional + KernelBoot *KernelBoot `json:"kernelBoot,omitempty"` +} + +type Devices struct { + // Fall back to legacy virtio 0.9 support if virtio bus is selected on devices. + // This is helpful for old machines like CentOS6 or RHEL6 which + // do not understand virtio_non_transitional (virtio 1.0). + UseVirtioTransitional *bool `json:"useVirtioTransitional,omitempty"` + // DisableHotplug disabled the ability to hotplug disks. + DisableHotplug bool `json:"disableHotplug,omitempty"` + // Disks describes disks, cdroms and luns which are connected to the vmi. + Disks []Disk `json:"disks,omitempty"` + // Watchdog describes a watchdog device which can be added to the vmi. + Watchdog *Watchdog `json:"watchdog,omitempty"` + // Interfaces describe network interfaces which are added to the vmi. + Interfaces []Interface `json:"interfaces,omitempty"` + // Inputs describe input devices + Inputs []Input `json:"inputs,omitempty"` + // Whether to attach a pod network interface. Defaults to true. + AutoattachPodInterface *bool `json:"autoattachPodInterface,omitempty"` + // Whether to attach the default graphics device or not. + // VNC will not be available if set to false. Defaults to true. + AutoattachGraphicsDevice *bool `json:"autoattachGraphicsDevice,omitempty"` + // Whether to attach the default serial console or not. + // Serial console access will not be available if set to false. Defaults to true. + AutoattachSerialConsole *bool `json:"autoattachSerialConsole,omitempty"` + // Whether to attach the Memory balloon device with default period. + // Period can be adjusted in virt-config. + // Defaults to true. + // +optional + AutoattachMemBalloon *bool `json:"autoattachMemBalloon,omitempty"` + // Whether to attach an Input Device. + // Defaults to false. + // +optional + AutoattachInputDevice *bool `json:"autoattachInputDevice,omitempty"` + // Whether to attach the VSOCK CID to the VM or not. + // VSOCK access will be available if set to true. Defaults to false. + AutoattachVSOCK *bool `json:"autoattachVSOCK,omitempty"` + // Whether to have random number generator from host + // +optional + Rng *Rng `json:"rng,omitempty"` + // Whether or not to enable virtio multi-queue for block devices. + // Defaults to false. + // +optional + BlockMultiQueue *bool `json:"blockMultiQueue,omitempty"` + // If specified, virtual network interfaces configured with a virtio bus will also enable the vhost multiqueue feature for network devices. The number of queues created depends on additional factors of the VirtualMachineInstance, like the number of guest CPUs. + // +optional + NetworkInterfaceMultiQueue *bool `json:"networkInterfaceMultiqueue,omitempty"` + //Whether to attach a GPU device to the vmi. + // +optional + // +listType=atomic + GPUs []GPU `json:"gpus,omitempty"` + // Filesystems describes filesystem which is connected to the vmi. + // +optional + // +listType=atomic + Filesystems []Filesystem `json:"filesystems,omitempty"` + //Whether to attach a host device to the vmi. + // +optional + // +listType=atomic + HostDevices []HostDevice `json:"hostDevices,omitempty"` + // To configure and access client devices such as redirecting USB + // +optional + ClientPassthrough *ClientPassthroughDevices `json:"clientPassthrough,omitempty"` + // Whether to emulate a sound device. + // +optional + Sound *SoundDevice `json:"sound,omitempty"` + // Whether to emulate a TPM device. + // +optional + TPM *TPMDevice `json:"tpm,omitempty"` +} + +// Represent a subset of client devices that can be accessed by VMI. At the +// moment only, USB devices using Usbredir's library and tooling. Another fit +// would be a smartcard with libcacard. +// +// The struct is currently empty as there is no immediate request for +// user-facing APIs. This structure simply turns on USB redirection of +// UsbClientPassthroughMaxNumberOf devices. +type ClientPassthroughDevices struct { +} + +// Represents the upper limit allowed by QEMU + KubeVirt. +const ( + UsbClientPassthroughMaxNumberOf = 4 +) + +// Represents the user's configuration to emulate sound cards in the VMI. +type SoundDevice struct { + // User's defined name for this sound device + Name string `json:"name"` + // We only support ich9 or ac97. + // If SoundDevice is not set: No sound card is emulated. + // If SoundDevice is set but Model is not: ich9 + // +optional + Model string `json:"model,omitempty"` +} + +type TPMDevice struct{} + +type InputBus string + +const ( + InputBusUSB InputBus = "usb" + InputBusVirtio InputBus = "virtio" +) + +type InputType string + +const ( + InputTypeTablet InputType = "tablet" + InputTypeKeyboard InputType = "keyboard" +) + +type Input struct { + // Bus indicates the bus of input device to emulate. + // Supported values: virtio, usb. + Bus InputBus `json:"bus,omitempty"` + // Type indicated the type of input device. + // Supported values: tablet. + Type InputType `json:"type"` + // Name is the device name + Name string `json:"name"` +} + +type Filesystem struct { + // Name is the device name + Name string `json:"name"` + // Virtiofs is supported + Virtiofs *FilesystemVirtiofs `json:"virtiofs"` +} + +type FilesystemVirtiofs struct{} + +type GPU struct { + // Name of the GPU device as exposed by a device plugin + Name string `json:"name"` + DeviceName string `json:"deviceName"` + VirtualGPUOptions *VGPUOptions `json:"virtualGPUOptions,omitempty"` + // If specified, the virtual network interface address and its tag will be provided to the guest via config drive + // +optional + Tag string `json:"tag,omitempty"` +} + +type VGPUOptions struct { + Display *VGPUDisplayOptions `json:"display,omitempty"` +} + +type VGPUDisplayOptions struct { + // Enabled determines if a display addapter backed by a vGPU should be enabled or disabled on the guest. + // Defaults to true. + // +optional + Enabled *bool `json:"enabled,omitempty"` + // Enables a boot framebuffer, until the guest OS loads a real GPU driver + // Defaults to true. + // +optional + RamFB *FeatureState `json:"ramFB,omitempty"` +} + +type HostDevice struct { + Name string `json:"name"` + // DeviceName is the resource name of the host device exposed by a device plugin + DeviceName string `json:"deviceName"` + // If specified, the virtual network interface address and its tag will be provided to the guest via config drive + // +optional + Tag string `json:"tag,omitempty"` +} + +type Disk struct { + // Name is the device name + Name string `json:"name"` + // DiskDevice specifies as which device the disk should be added to the guest. + // Defaults to Disk. + DiskDevice `json:",inline"` + // BootOrder is an integer value > 0, used to determine ordering of boot devices. + // Lower values take precedence. + // Each disk or interface that has a boot order must have a unique value. + // Disks without a boot order are not tried if a disk with a boot order exists. + // +optional + BootOrder *uint `json:"bootOrder,omitempty"` + // Serial provides the ability to specify a serial number for the disk device. + // +optional + Serial string `json:"serial,omitempty"` + // dedicatedIOThread indicates this disk should have an exclusive IO Thread. + // Enabling this implies useIOThreads = true. + // Defaults to false. + // +optional + DedicatedIOThread *bool `json:"dedicatedIOThread,omitempty"` + // Cache specifies which kvm disk cache mode should be used. + // Supported values are: CacheNone, CacheWriteThrough. + // +optional + Cache DriverCache `json:"cache,omitempty"` + // IO specifies which QEMU disk IO mode should be used. + // Supported values are: native, default, threads. + // +optional + IO DriverIO `json:"io,omitempty"` + // If specified, disk address and its tag will be provided to the guest via config drive metadata + // +optional + Tag string `json:"tag,omitempty"` + // If specified, the virtual disk will be presented with the given block sizes. + // +optional + BlockSize *BlockSize `json:"blockSize,omitempty"` + // If specified the disk is made sharable and multiple write from different VMs are permitted + // +optional + Shareable *bool `json:"shareable,omitempty"` +} + +// CustomBlockSize represents the desired logical and physical block size for a VM disk. +type CustomBlockSize struct { + Logical uint `json:"logical"` + Physical uint `json:"physical"` +} + +// BlockSize provides the option to change the block size presented to the VM for a disk. +// Only one of its members may be specified. +type BlockSize struct { + Custom *CustomBlockSize `json:"custom,omitempty"` + MatchVolume *FeatureState `json:"matchVolume,omitempty"` +} + +// Represents the target of a volume to mount. +// Only one of its members may be specified. +type DiskDevice struct { + // Attach a volume as a disk to the vmi. + Disk *DiskTarget `json:"disk,omitempty"` + // Attach a volume as a LUN to the vmi. + LUN *LunTarget `json:"lun,omitempty"` + // Attach a volume as a cdrom to the vmi. + CDRom *CDRomTarget `json:"cdrom,omitempty"` +} + +type DiskBus string + +const ( + DiskBusSCSI DiskBus = "scsi" + DiskBusSATA DiskBus = "sata" + DiskBusVirtio DiskBus = VirtIO + DiskBusUSB DiskBus = "usb" +) + +type DiskTarget struct { + // Bus indicates the type of disk device to emulate. + // supported values: virtio, sata, scsi, usb. + Bus DiskBus `json:"bus,omitempty"` + // ReadOnly. + // Defaults to false. + ReadOnly bool `json:"readonly,omitempty"` + // If specified, the virtual disk will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10 + // +optional + PciAddress string `json:"pciAddress,omitempty"` +} + +type LaunchSecurity struct { + // AMD Secure Encrypted Virtualization (SEV). + SEV *SEV `json:"sev,omitempty"` +} + +type SEV struct { +} + +type LunTarget struct { + // Bus indicates the type of disk device to emulate. + // supported values: virtio, sata, scsi. + Bus DiskBus `json:"bus,omitempty"` + // ReadOnly. + // Defaults to false. + ReadOnly bool `json:"readonly,omitempty"` +} + +// TrayState indicates if a tray of a cdrom is open or closed. +type TrayState string + +const ( + // TrayStateOpen indicates that the tray of a cdrom is open. + TrayStateOpen TrayState = "open" + // TrayStateClosed indicates that the tray of a cdrom is closed. + TrayStateClosed TrayState = "closed" +) + +type CDRomTarget struct { + // Bus indicates the type of disk device to emulate. + // supported values: virtio, sata, scsi. + Bus DiskBus `json:"bus,omitempty"` + // ReadOnly. + // Defaults to true. + ReadOnly *bool `json:"readonly,omitempty"` + // Tray indicates if the tray of the device is open or closed. + // Allowed values are "open" and "closed". + // Defaults to closed. + // +optional + Tray TrayState `json:"tray,omitempty"` +} + +// Volume represents a named volume in a vmi. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the vmi. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name"` + // VolumeSource represents the location and type of the mounted volume. + // Defaults to Disk, if no type is specified. + VolumeSource `json:",inline"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostDisk represents a disk created on the cluster level + // +optional + HostDisk *HostDisk `json:"hostDisk,omitempty"` + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. + // Directly attached to the vmi via qemu. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // CloudInitNoCloud represents a cloud-init NoCloud user-data source. + // The NoCloud data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest. + // More info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html + // +optional + CloudInitNoCloud *CloudInitNoCloudSource `json:"cloudInitNoCloud,omitempty"` + // CloudInitConfigDrive represents a cloud-init Config Drive user-data source. + // The Config Drive data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest. + // More info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html + // +optional + CloudInitConfigDrive *CloudInitConfigDriveSource `json:"cloudInitConfigDrive,omitempty"` + // Represents a Sysprep volume source. + // +optional + Sysprep *SysprepSource `json:"sysprep,omitempty"` + // ContainerDisk references a docker image, embedding a qcow or raw disk. + // More info: https://kubevirt.gitbooks.io/user-guide/registry-disk.html + // +optional + ContainerDisk *ContainerDiskSource `json:"containerDisk,omitempty"` + // Ephemeral is a special volume source that "wraps" specified source and provides copy-on-write image on top of it. + // +optional + Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty"` + // EmptyDisk represents a temporary disk which shares the vmis lifecycle. + // More info: https://kubevirt.gitbooks.io/user-guide/disks-and-volumes.html + // +optional + EmptyDisk *EmptyDiskSource `json:"emptyDisk,omitempty"` + // DataVolume represents the dynamic creation a PVC for this volume as well as + // the process of populating that PVC with a disk image. + // +optional + DataVolume *DataVolumeSource `json:"dataVolume,omitempty"` + // ConfigMapSource represents a reference to a ConfigMap in the same namespace. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/ + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"` + // SecretVolumeSource represents a reference to a secret data in the same namespace. + // More info: https://kubernetes.io/docs/concepts/configuration/secret/ + // +optional + Secret *SecretVolumeSource `json:"secret,omitempty"` + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty"` + // ServiceAccountVolumeSource represents a reference to a service account. + // There can only be one volume of this type! + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + ServiceAccount *ServiceAccountVolumeSource `json:"serviceAccount,omitempty"` + // DownwardMetrics adds a very small disk to VMIs which contains a limited view of host and guest + // metrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics. + DownwardMetrics *DownwardMetricsVolumeSource `json:"downwardMetrics,omitempty"` + // MemoryDump is attached to the virt launcher and is populated with a memory dump of the vmi + MemoryDump *MemoryDumpVolumeSource `json:"memoryDump,omitempty"` +} + +// HotplugVolumeSource Represents the source of a volume to mount which are capable +// of being hotplugged on a live running VMI. +// Only one of its members may be specified. +type HotplugVolumeSource struct { + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. + // Directly attached to the vmi via qemu. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // DataVolume represents the dynamic creation a PVC for this volume as well as + // the process of populating that PVC with a disk image. + // +optional + DataVolume *DataVolumeSource `json:"dataVolume,omitempty"` +} + +type DataVolumeSource struct { + // Name of both the DataVolume and the PVC in the same namespace. + // After PVC population the DataVolume is garbage collected by default. + Name string `json:"name"` + // Hotpluggable indicates whether the volume can be hotplugged and hotunplugged. + // +optional + Hotpluggable bool `json:"hotpluggable,omitempty"` +} + +// PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. +// Directly attached to the vmi via qemu. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims +type PersistentVolumeClaimVolumeSource struct { + v1.PersistentVolumeClaimVolumeSource `json:",inline"` + // Hotpluggable indicates whether the volume can be hotplugged and hotunplugged. + // +optional + Hotpluggable bool `json:"hotpluggable,omitempty"` +} + +type MemoryDumpVolumeSource struct { + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. + // Directly attached to the virt launcher + // +optional + PersistentVolumeClaimVolumeSource `json:",inline"` +} + +type EphemeralVolumeSource struct { + // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. + // Directly attached to the vmi via qemu. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *v1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` +} + +// EmptyDisk represents a temporary disk which shares the vmis lifecycle. +type EmptyDiskSource struct { + // Capacity of the sparse disk. + Capacity resource.Quantity `json:"capacity"` +} + +// Represents a docker image with an embedded disk. +type ContainerDiskSource struct { + // Image is the name of the image with the embedded disk. + Image string `json:"image"` + // ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist. + ImagePullSecret string `json:"imagePullSecret,omitempty"` + // Path defines the path to disk file in the container + Path string `json:"path,omitempty"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"` +} + +// Exactly one of its members must be set. +type ClockOffset struct { + // UTC sets the guest clock to UTC on each boot. If an offset is specified, + // guest changes to the clock will be kept during reboots and are not reset. + UTC *ClockOffsetUTC `json:"utc,omitempty"` + // Timezone sets the guest clock to the specified timezone. + // Zone name follows the TZ environment variable format (e.g. 'America/New_York'). + Timezone *ClockOffsetTimezone `json:"timezone,omitempty"` +} + +// UTC sets the guest clock to UTC on each boot. +type ClockOffsetUTC struct { + // OffsetSeconds specifies an offset in seconds, relative to UTC. If set, + // guest changes to the clock will be kept during reboots and not reset. + OffsetSeconds *int `json:"offsetSeconds,omitempty"` +} + +// ClockOffsetTimezone sets the guest clock to the specified timezone. +// Zone name follows the TZ environment variable format (e.g. 'America/New_York'). +type ClockOffsetTimezone string + +// Represents the clock and timers of a vmi. +// +kubebuilder:pruning:PreserveUnknownFields +type Clock struct { + // ClockOffset allows specifying the UTC offset or the timezone of the guest clock. + ClockOffset `json:",inline"` + // Timer specifies whih timers are attached to the vmi. + // +optional + Timer *Timer `json:"timer,omitempty"` +} + +// Represents all available timers in a vmi. +type Timer struct { + // HPET (High Precision Event Timer) - multiple timers with periodic interrupts. + HPET *HPETTimer `json:"hpet,omitempty"` + // KVM (KVM clock) - lets guests read the host’s wall clock time (paravirtualized). For linux guests. + KVM *KVMTimer `json:"kvm,omitempty"` + // PIT (Programmable Interval Timer) - a timer with periodic interrupts. + PIT *PITTimer `json:"pit,omitempty"` + // RTC (Real Time Clock) - a continuously running timer with periodic interrupts. + RTC *RTCTimer `json:"rtc,omitempty"` + // Hyperv (Hypervclock) - lets guests read the host’s wall clock time (paravirtualized). For windows guests. + Hyperv *HypervTimer `json:"hyperv,omitempty"` +} + +// HPETTickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. +type HPETTickPolicy string + +// PITTickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. +type PITTickPolicy string + +// RTCTickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. +type RTCTickPolicy string + +const ( + // HPETTickPolicyDelay delivers ticks at a constant rate. The guest time will + // be delayed due to the late tick + HPETTickPolicyDelay HPETTickPolicy = "delay" + // HPETTickPolicyCatchup Delivers ticks at a higher rate to catch up with the + // missed tick. The guest time should not be delayed once catchup is complete + HPETTickPolicyCatchup HPETTickPolicy = "catchup" + // HPETTickPolicyMerge merges the missed tick(s) into one tick and inject. The + // guest time may be delayed, depending on how the OS reacts to the merging + // of ticks. + HPETTickPolicyMerge HPETTickPolicy = "merge" + // HPETTickPolicyDiscard discards all missed ticks. + HPETTickPolicyDiscard HPETTickPolicy = "discard" + + // PITTickPolicyDelay delivers ticks at a constant rate. The guest time will + // be delayed due to the late tick. + PITTickPolicyDelay PITTickPolicy = "delay" + // PITTickPolicyCatchup Delivers ticks at a higher rate to catch up with the + // missed tick. The guest time should not be delayed once catchup is complete. + PITTickPolicyCatchup PITTickPolicy = "catchup" + // PITTickPolicyDiscard discards all missed ticks. + PITTickPolicyDiscard PITTickPolicy = "discard" + + // RTCTickPolicyDelay delivers ticks at a constant rate. The guest time will + // be delayed due to the late tick. + RTCTickPolicyDelay RTCTickPolicy = "delay" + // RTCTickPolicyCatchup Delivers ticks at a higher rate to catch up with the + // missed tick. The guest time should not be delayed once catchup is complete. + RTCTickPolicyCatchup RTCTickPolicy = "catchup" +) + +// RTCTimerTrack specifies from which source to track the time. +type RTCTimerTrack string + +const ( + // TrackGuest tracks the guest time. + TrackGuest RTCTimerTrack = "guest" + // TrackWall tracks the host time. + TrackWall RTCTimerTrack = "wall" +) + +type RTCTimer struct { + // TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. + // One of "delay", "catchup". + TickPolicy RTCTickPolicy `json:"tickPolicy,omitempty"` + // Enabled set to false makes sure that the machine type or a preset can't add the timer. + // Defaults to true. + // +optional + Enabled *bool `json:"present,omitempty"` + // Track the guest or the wall clock. + Track RTCTimerTrack `json:"track,omitempty"` +} + +type HPETTimer struct { + // TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. + // One of "delay", "catchup", "merge", "discard". + TickPolicy HPETTickPolicy `json:"tickPolicy,omitempty"` + // Enabled set to false makes sure that the machine type or a preset can't add the timer. + // Defaults to true. + // +optional + Enabled *bool `json:"present,omitempty"` +} + +type PITTimer struct { + // TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest. + // One of "delay", "catchup", "discard". + TickPolicy PITTickPolicy `json:"tickPolicy,omitempty"` + // Enabled set to false makes sure that the machine type or a preset can't add the timer. + // Defaults to true. + // +optional + Enabled *bool `json:"present,omitempty"` +} + +type KVMTimer struct { + // Enabled set to false makes sure that the machine type or a preset can't add the timer. + // Defaults to true. + // +optional + Enabled *bool `json:"present,omitempty"` +} + +type HypervTimer struct { + // Enabled set to false makes sure that the machine type or a preset can't add the timer. + // Defaults to true. + // +optional + Enabled *bool `json:"present,omitempty"` +} + +type Features struct { + // ACPI enables/disables ACPI inside the guest. + // Defaults to enabled. + // +optional + ACPI FeatureState `json:"acpi,omitempty"` + // Defaults to the machine type setting. + // +optional + APIC *FeatureAPIC `json:"apic,omitempty"` + // Defaults to the machine type setting. + // +optional + Hyperv *FeatureHyperv `json:"hyperv,omitempty"` + // SMM enables/disables System Management Mode. + // TSEG not yet implemented. + // +optional + SMM *FeatureState `json:"smm,omitempty"` + // Configure how KVM presence is exposed to the guest. + // +optional + KVM *FeatureKVM `json:"kvm,omitempty"` + // Notify the guest that the host supports paravirtual spinlocks. + // For older kernels this feature should be explicitly disabled. + // +optional + Pvspinlock *FeatureState `json:"pvspinlock,omitempty"` +} + +type SyNICTimer struct { + Enabled *bool `json:"enabled,omitempty"` + Direct *FeatureState `json:"direct,omitempty"` +} + +// Represents if a feature is enabled or disabled. +type FeatureState struct { + // Enabled determines if the feature should be enabled or disabled on the guest. + // Defaults to true. + // +optional + Enabled *bool `json:"enabled,omitempty"` +} + +type FeatureAPIC struct { + // Enabled determines if the feature should be enabled or disabled on the guest. + // Defaults to true. + // +optional + Enabled *bool `json:"enabled,omitempty"` + // EndOfInterrupt enables the end of interrupt notification in the guest. + // Defaults to false. + // +optional + EndOfInterrupt bool `json:"endOfInterrupt,omitempty"` +} + +type FeatureSpinlocks struct { + // Enabled determines if the feature should be enabled or disabled on the guest. + // Defaults to true. + // +optional + Enabled *bool `json:"enabled,omitempty"` + // Retries indicates the number of retries. + // Must be a value greater or equal 4096. + // Defaults to 4096. + // +optional + Retries *uint32 `json:"spinlocks,omitempty"` +} + +type FeatureVendorID struct { + // Enabled determines if the feature should be enabled or disabled on the guest. + // Defaults to true. + // +optional + Enabled *bool `json:"enabled,omitempty"` + // VendorID sets the hypervisor vendor id, visible to the vmi. + // String up to twelve characters. + VendorID string `json:"vendorid,omitempty"` +} + +// Hyperv specific features. +type FeatureHyperv struct { + // Relaxed instructs the guest OS to disable watchdog timeouts. + // Defaults to the machine type setting. + // +optional + Relaxed *FeatureState `json:"relaxed,omitempty"` + // VAPIC improves the paravirtualized handling of interrupts. + // Defaults to the machine type setting. + // +optional + VAPIC *FeatureState `json:"vapic,omitempty"` + // Spinlocks allows to configure the spinlock retry attempts. + // +optional + Spinlocks *FeatureSpinlocks `json:"spinlocks,omitempty"` + // VPIndex enables the Virtual Processor Index to help windows identifying virtual processors. + // Defaults to the machine type setting. + // +optional + VPIndex *FeatureState `json:"vpindex,omitempty"` + // Runtime improves the time accounting to improve scheduling in the guest. + // Defaults to the machine type setting. + // +optional + Runtime *FeatureState `json:"runtime,omitempty"` + // SyNIC enables the Synthetic Interrupt Controller. + // Defaults to the machine type setting. + // +optional + SyNIC *FeatureState `json:"synic,omitempty"` + // SyNICTimer enables Synthetic Interrupt Controller Timers, reducing CPU load. + // Defaults to the machine type setting. + // +optional + SyNICTimer *SyNICTimer `json:"synictimer,omitempty"` + // Reset enables Hyperv reboot/reset for the vmi. Requires synic. + // Defaults to the machine type setting. + // +optional + Reset *FeatureState `json:"reset,omitempty"` + // VendorID allows setting the hypervisor vendor id. + // Defaults to the machine type setting. + // +optional + VendorID *FeatureVendorID `json:"vendorid,omitempty"` + // Frequencies improves the TSC clock source handling for Hyper-V on KVM. + // Defaults to the machine type setting. + // +optional + Frequencies *FeatureState `json:"frequencies,omitempty"` + // Reenlightenment enables the notifications on TSC frequency changes. + // Defaults to the machine type setting. + // +optional + Reenlightenment *FeatureState `json:"reenlightenment,omitempty"` + // TLBFlush improves performances in overcommited environments. Requires vpindex. + // Defaults to the machine type setting. + // +optional + TLBFlush *FeatureState `json:"tlbflush,omitempty"` + // IPI improves performances in overcommited environments. Requires vpindex. + // Defaults to the machine type setting. + // +optional + IPI *FeatureState `json:"ipi,omitempty"` + // EVMCS Speeds up L2 vmexits, but disables other virtualization features. Requires vapic. + // Defaults to the machine type setting. + // +optional + EVMCS *FeatureState `json:"evmcs,omitempty"` +} + +type FeatureKVM struct { + // Hide the KVM hypervisor from standard MSR based discovery. + // Defaults to false + Hidden bool `json:"hidden,omitempty"` +} + +// WatchdogAction defines the watchdog action, if a watchdog gets triggered. +type WatchdogAction string + +const ( + // WatchdogActionPoweroff will poweroff the vmi if the watchdog gets triggered. + WatchdogActionPoweroff WatchdogAction = "poweroff" + // WatchdogActionReset will reset the vmi if the watchdog gets triggered. + WatchdogActionReset WatchdogAction = "reset" + // WatchdogActionShutdown will shutdown the vmi if the watchdog gets triggered. + WatchdogActionShutdown WatchdogAction = "shutdown" +) + +// Named watchdog device. +type Watchdog struct { + // Name of the watchdog. + Name string `json:"name"` + // WatchdogDevice contains the watchdog type and actions. + // Defaults to i6300esb. + WatchdogDevice `json:",inline"` +} + +// Hardware watchdog device. +// Exactly one of its members must be set. +type WatchdogDevice struct { + // i6300esb watchdog device. + // +optional + I6300ESB *I6300ESBWatchdog `json:"i6300esb,omitempty"` +} + +// i6300esb watchdog device. +type I6300ESBWatchdog struct { + // The action to take. Valid values are poweroff, reset, shutdown. + // Defaults to reset. + Action WatchdogAction `json:"action,omitempty"` +} + +type Interface struct { + // Logical name of the interface as well as a reference to the associated networks. + // Must match the Name of a Network. + Name string `json:"name"` + // Interface model. + // One of: e1000, e1000e, ne2k_pci, pcnet, rtl8139, virtio. + // Defaults to virtio. + // TODO:(ihar) switch to enums once opengen-api supports them. See: https://github.com/kubernetes/kube-openapi/issues/51 + Model string `json:"model,omitempty"` + // BindingMethod specifies the method which will be used to connect the interface to the guest. + // Defaults to Bridge. + InterfaceBindingMethod `json:",inline"` + // List of ports to be forwarded to the virtual machine. + Ports []Port `json:"ports,omitempty"` + // Interface MAC address. For example: de:ad:00:00:be:af or DE-AD-00-00-BE-AF. + MacAddress string `json:"macAddress,omitempty"` + // BootOrder is an integer value > 0, used to determine ordering of boot devices. + // Lower values take precedence. + // Each interface or disk that has a boot order must have a unique value. + // Interfaces without a boot order are not tried. + // +optional + BootOrder *uint `json:"bootOrder,omitempty"` + // If specified, the virtual network interface will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10 + // +optional + PciAddress string `json:"pciAddress,omitempty"` + // If specified the network interface will pass additional DHCP options to the VMI + // +optional + DHCPOptions *DHCPOptions `json:"dhcpOptions,omitempty"` + // If specified, the virtual network interface address and its tag will be provided to the guest via config drive + // +optional + Tag string `json:"tag,omitempty"` + // If specified, the ACPI index is used to provide network interface device naming, that is stable across changes + // in PCI addresses assigned to the device. + // This value is required to be unique across all devices and be between 1 and (16*1024-1). + // +optional + ACPIIndex int `json:"acpiIndex,omitempty"` +} + +// Extra DHCP options to use in the interface. +type DHCPOptions struct { + // If specified will pass option 67 to interface's DHCP server + // +optional + BootFileName string `json:"bootFileName,omitempty"` + // If specified will pass option 66 to interface's DHCP server + // +optional + TFTPServerName string `json:"tftpServerName,omitempty"` + // If specified will pass the configured NTP server to the VM via DHCP option 042. + // +optional + NTPServers []string `json:"ntpServers,omitempty"` + // If specified will pass extra DHCP options for private use, range: 224-254 + // +optional + PrivateOptions []DHCPPrivateOptions `json:"privateOptions,omitempty"` +} + +func (d *DHCPOptions) UnmarshalJSON(data []byte) error { + type DHCPOptionsAlias DHCPOptions + var dhcpOptionsAlias DHCPOptionsAlias + + if err := json.Unmarshal(data, &dhcpOptionsAlias); err != nil { + return err + } + + for i, ntpServer := range dhcpOptionsAlias.NTPServers { + if sanitizedIP, err := sanitizeIP(ntpServer); err == nil { + dhcpOptionsAlias.NTPServers[i] = sanitizedIP + } + } + + *d = DHCPOptions(dhcpOptionsAlias) + return nil +} + +// DHCPExtraOptions defines Extra DHCP options for a VM. +type DHCPPrivateOptions struct { + // Option is an Integer value from 224-254 + // Required. + Option int `json:"option"` + // Value is a String value for the Option provided + // Required. + Value string `json:"value"` +} + +// Represents the method which will be used to connect the interface to the guest. +// Only one of its members may be specified. +type InterfaceBindingMethod struct { + Bridge *InterfaceBridge `json:"bridge,omitempty"` + Slirp *InterfaceSlirp `json:"slirp,omitempty"` + Masquerade *InterfaceMasquerade `json:"masquerade,omitempty"` + SRIOV *InterfaceSRIOV `json:"sriov,omitempty"` + Macvtap *InterfaceMacvtap `json:"macvtap,omitempty"` + Passt *InterfacePasst `json:"passt,omitempty"` +} + +// InterfaceBridge connects to a given network via a linux bridge. +type InterfaceBridge struct{} + +// InterfaceSlirp connects to a given network using QEMU user networking mode. +type InterfaceSlirp struct{} + +// InterfaceMasquerade connects to a given network using netfilter rules to nat the traffic. +type InterfaceMasquerade struct{} + +// InterfaceSRIOV connects to a given network by passing-through an SR-IOV PCI device via vfio. +type InterfaceSRIOV struct{} + +// InterfaceMacvtap connects to a given network by extending the Kubernetes node's L2 networks via a macvtap interface. +type InterfaceMacvtap struct{} + +// InterfacePasst connects to a given network. +type InterfacePasst struct{} + +// Port represents a port to expose from the virtual machine. +// Default protocol TCP. +// The port field is mandatory +type Port struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty"` + // Protocol for port. Must be UDP or TCP. + // Defaults to "TCP". + // +optional + Protocol string `json:"protocol,omitempty"` + // Number of port to expose for the virtual machine. + // This must be a valid port number, 0 < x < 65536. + Port int32 `json:"port"` +} + +type AccessCredentialSecretSource struct { + // SecretName represents the name of the secret in the VMI's namespace + SecretName string `json:"secretName"` +} + +type ConfigDriveSSHPublicKeyAccessCredentialPropagation struct{} + +// AuthorizedKeysFile represents a path within the guest +// that ssh public keys should be propagated to +type AuthorizedKeysFile struct { + // FilePath represents the place on the guest that the authorized_keys + // file should be writen to. This is expected to be a full path including + // both the base directory and file name. + FilePath string `json:"filePath"` +} + +type QemuGuestAgentUserPasswordAccessCredentialPropagation struct{} + +type QemuGuestAgentSSHPublicKeyAccessCredentialPropagation struct { + // Users represents a list of guest users that should have the ssh public keys + // added to their authorized_keys file. + // +listType=set + Users []string `json:"users"` +} + +// SSHPublicKeyAccessCredentialSource represents where to retrieve the ssh key +// credentials +// Only one of its members may be specified. +type SSHPublicKeyAccessCredentialSource struct { + // Secret means that the access credential is pulled from a kubernetes secret + // +optional + Secret *AccessCredentialSecretSource `json:"secret,omitempty"` +} + +// SSHPublicKeyAccessCredentialPropagationMethod represents the method used to +// inject a ssh public key into the vm guest. +// Only one of its members may be specified. +type SSHPublicKeyAccessCredentialPropagationMethod struct { + // ConfigDrivePropagation means that the ssh public keys are injected + // into the VM using metadata using the configDrive cloud-init provider + // +optional + ConfigDrive *ConfigDriveSSHPublicKeyAccessCredentialPropagation `json:"configDrive,omitempty"` + + // QemuGuestAgentAccessCredentailPropagation means ssh public keys are + // dynamically injected into the vm at runtime via the qemu guest agent. + // This feature requires the qemu guest agent to be running within the guest. + // +optional + QemuGuestAgent *QemuGuestAgentSSHPublicKeyAccessCredentialPropagation `json:"qemuGuestAgent,omitempty"` +} + +// SSHPublicKeyAccessCredential represents a source and propagation method for +// injecting ssh public keys into a vm guest +type SSHPublicKeyAccessCredential struct { + // Source represents where the public keys are pulled from + Source SSHPublicKeyAccessCredentialSource `json:"source"` + + // PropagationMethod represents how the public key is injected into the vm guest. + PropagationMethod SSHPublicKeyAccessCredentialPropagationMethod `json:"propagationMethod"` +} + +// UserPasswordAccessCredentialSource represents where to retrieve the user password +// credentials +// Only one of its members may be specified. +type UserPasswordAccessCredentialSource struct { + // Secret means that the access credential is pulled from a kubernetes secret + // +optional + Secret *AccessCredentialSecretSource `json:"secret,omitempty"` +} + +// UserPasswordAccessCredentialPropagationMethod represents the method used to +// inject a user passwords into the vm guest. +// Only one of its members may be specified. +type UserPasswordAccessCredentialPropagationMethod struct { + // QemuGuestAgentAccessCredentailPropagation means passwords are + // dynamically injected into the vm at runtime via the qemu guest agent. + // This feature requires the qemu guest agent to be running within the guest. + // +optional + QemuGuestAgent *QemuGuestAgentUserPasswordAccessCredentialPropagation `json:"qemuGuestAgent,omitempty"` +} + +// UserPasswordAccessCredential represents a source and propagation method for +// injecting user passwords into a vm guest +// Only one of its members may be specified. +type UserPasswordAccessCredential struct { + // Source represents where the user passwords are pulled from + Source UserPasswordAccessCredentialSource `json:"source"` + + // propagationMethod represents how the user passwords are injected into the vm guest. + PropagationMethod UserPasswordAccessCredentialPropagationMethod `json:"propagationMethod"` +} + +// AccessCredential represents a credential source that can be used to +// authorize remote access to the vm guest +// Only one of its members may be specified. +type AccessCredential struct { + // SSHPublicKey represents the source and method of applying a ssh public + // key into a guest virtual machine. + // +optional + SSHPublicKey *SSHPublicKeyAccessCredential `json:"sshPublicKey,omitempty"` + // UserPassword represents the source and method for applying a guest user's + // password + // +optional + UserPassword *UserPasswordAccessCredential `json:"userPassword,omitempty"` +} + +// Network represents a network type and a resource that should be connected to the vm. +type Network struct { + // Network name. + // Must be a DNS_LABEL and unique within the vm. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name"` + // NetworkSource represents the network type and the source interface that should be connected to the virtual machine. + // Defaults to Pod, if no type is specified. + NetworkSource `json:",inline"` +} + +// Represents the source resource that will be connected to the vm. +// Only one of its members may be specified. +type NetworkSource struct { + Pod *PodNetwork `json:"pod,omitempty"` + Multus *MultusNetwork `json:"multus,omitempty"` +} + +// Represents the stock pod network interface. +type PodNetwork struct { + // CIDR for vm network. + // Default 10.0.2.0/24 if not specified. + VMNetworkCIDR string `json:"vmNetworkCIDR,omitempty"` + + // IPv6 CIDR for the vm network. + // Defaults to fd10:0:2::/120 if not specified. + VMIPv6NetworkCIDR string `json:"vmIPv6NetworkCIDR,omitempty"` +} + +func (podNet *PodNetwork) UnmarshalJSON(data []byte) error { + type PodNetworkAlias PodNetwork + var podNetAlias PodNetworkAlias + + if err := json.Unmarshal(data, &podNetAlias); err != nil { + return err + } + + if sanitizedCIDR, err := sanitizeCIDR(podNetAlias.VMNetworkCIDR); err == nil { + podNetAlias.VMNetworkCIDR = sanitizedCIDR + } + + *podNet = PodNetwork(podNetAlias) + return nil +} + +// Rng represents the random device passed from host +type Rng struct { +} + +// Represents the multus cni network. +type MultusNetwork struct { + // References to a NetworkAttachmentDefinition CRD object. Format: + // , /. If namespace is not + // specified, VMI namespace is assumed. + NetworkName string `json:"networkName"` + + // Select the default network and add it to the + // multus-cni.io/default-network annotation. + Default bool `json:"default,omitempty"` +} diff --git a/vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go b/vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go new file mode 100644 index 000000000..5afc9bf97 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/schema_swagger_generated.go @@ -0,0 +1,814 @@ +// Code generated by swagger-doc. DO NOT EDIT. + +package v1 + +func (HostDisk) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents a disk created on the cluster level", + "path": "The path to HostDisk image located on the cluster", + "type": "Contains information if disk.img exists or should be created\nallowed options are 'Disk' and 'DiskOrCreate'", + "capacity": "Capacity of the sparse disk\n+optional", + "shared": "Shared indicate whether the path is shared between nodes", + } +} + +func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ConfigMapVolumeSource adapts a ConfigMap into a volume.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes/#configmap", + "optional": "Specify whether the ConfigMap or it's keys must be defined\n+optional", + "volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional", + } +} + +func (SecretVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "SecretVolumeSource adapts a Secret into a volume.", + "secretName": "Name of the secret in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/concepts/storage/volumes#secret\n+optional", + "optional": "Specify whether the Secret or it's keys must be defined\n+optional", + "volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional", + } +} + +func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DownwardAPIVolumeSource represents a volume containing downward API info.", + "fields": "Fields is a list of downward API volume file\n+optional", + "volumeLabel": "The volume label of the resulting disk inside the VMI.\nDifferent bootstrapping mechanisms require different values.\nTypical values are \"cidata\" (cloud-init), \"config-2\" (cloud-init) or \"OEMDRV\" (kickstart).\n+optional", + } +} + +func (ServiceAccountVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ServiceAccountVolumeSource adapts a ServiceAccount into a volume.", + "serviceAccountName": "Name of the service account in the pod's namespace to use.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + } +} + +func (DownwardMetricsVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DownwardMetricsVolumeSource adds a very small disk to VMIs which contains a limited view of host and guest\nmetrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics.", + } +} + +func (SysprepSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents a Sysprep volume source.", + "secret": "Secret references a k8s Secret that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.\n+ optional", + "configMap": "ConfigMap references a ConfigMap that contains Sysprep answer file named autounattend.xml that should be attached as disk of CDROM type.\n+ optional", + } +} + +func (CloudInitNoCloudSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents a cloud-init nocloud user data source.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html", + "secretRef": "UserDataSecretRef references a k8s secret that contains NoCloud userdata.\n+ optional", + "userDataBase64": "UserDataBase64 contains NoCloud cloud-init userdata as a base64 encoded string.\n+ optional", + "userData": "UserData contains NoCloud inline cloud-init userdata.\n+ optional", + "networkDataSecretRef": "NetworkDataSecretRef references a k8s secret that contains NoCloud networkdata.\n+ optional", + "networkDataBase64": "NetworkDataBase64 contains NoCloud cloud-init networkdata as a base64 encoded string.\n+ optional", + "networkData": "NetworkData contains NoCloud inline cloud-init networkdata.\n+ optional", + } +} + +func (CloudInitConfigDriveSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents a cloud-init config drive user data source.\nMore info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html", + "secretRef": "UserDataSecretRef references a k8s secret that contains config drive userdata.\n+ optional", + "userDataBase64": "UserDataBase64 contains config drive cloud-init userdata as a base64 encoded string.\n+ optional", + "userData": "UserData contains config drive inline cloud-init userdata.\n+ optional", + "networkDataSecretRef": "NetworkDataSecretRef references a k8s secret that contains config drive networkdata.\n+ optional", + "networkDataBase64": "NetworkDataBase64 contains config drive cloud-init networkdata as a base64 encoded string.\n+ optional", + "networkData": "NetworkData contains config drive inline cloud-init networkdata.\n+ optional", + } +} + +func (DomainSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "resources": "Resources describes the Compute Resources required by this vmi.", + "cpu": "CPU allow specified the detailed CPU topology inside the vmi.\n+optional", + "memory": "Memory allow specifying the VMI memory features.\n+optional", + "machine": "Machine type.\n+optional", + "firmware": "Firmware.\n+optional", + "clock": "Clock sets the clock and timers of the vmi.\n+optional", + "features": "Features like acpi, apic, hyperv, smm.\n+optional", + "devices": "Devices allows adding disks, network interfaces, and others", + "ioThreadsPolicy": "Controls whether or not disks will share IOThreads.\nOmitting IOThreadsPolicy disables use of IOThreads.\nOne of: shared, auto\n+optional", + "chassis": "Chassis specifies the chassis info passed to the domain.\n+optional", + "launchSecurity": "Launch Security setting of the vmi.\n+optional", + } +} + +func (Chassis) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Chassis specifies the chassis info passed to the domain.", + } +} + +func (Bootloader) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the firmware blob used to assist in the domain creation process.\nUsed for setting the QEMU BIOS file path for the libvirt domain.", + "bios": "If set (default), BIOS will be used.\n+optional", + "efi": "If set, EFI will be used instead of BIOS.\n+optional", + } +} + +func (BIOS) SwaggerDoc() map[string]string { + return map[string]string{ + "": "If set (default), BIOS will be used.", + "useSerial": "If set, the BIOS output will be transmitted over serial\n+optional", + } +} + +func (EFI) SwaggerDoc() map[string]string { + return map[string]string{ + "": "If set, EFI will be used instead of BIOS.", + "secureBoot": "If set, SecureBoot will be enabled and the OVMF roms will be swapped for\nSecureBoot-enabled ones.\nRequires SMM to be enabled.\nDefaults to true\n+optional", + } +} + +func (KernelBootContainer) SwaggerDoc() map[string]string { + return map[string]string{ + "": "If set, the VM will be booted from the defined kernel / initrd.", + "image": "Image that contains initrd / kernel files.", + "imagePullSecret": "ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist.\n+optional", + "imagePullPolicy": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional", + "kernelPath": "The fully-qualified path to the kernel image in the host OS\n+optional", + "initrdPath": "the fully-qualified path to the ramdisk image in the host OS\n+optional", + } +} + +func (KernelBoot) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the firmware blob used to assist in the kernel boot process.\nUsed for setting the kernel, initrd and command line arguments", + "kernelArgs": "Arguments to be passed to the kernel at boot time", + "container": "Container defines the container that containes kernel artifacts", + } +} + +func (ResourceRequirements) SwaggerDoc() map[string]string { + return map[string]string{ + "requests": "Requests is a description of the initial vmi resources.\nValid resource keys are \"memory\" and \"cpu\".\n+optional", + "limits": "Limits describes the maximum amount of compute resources allowed.\nValid resource keys are \"memory\" and \"cpu\".\n+optional", + "overcommitGuestOverhead": "Don't ask the scheduler to take the guest-management overhead into account. Instead\nput the overhead only into the container's memory limit. This can lead to crashes if\nall memory is in use on a node. Defaults to false.", + } +} + +func (CPU) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CPU allows specifying the CPU topology.", + "cores": "Cores specifies the number of cores inside the vmi.\nMust be a value greater or equal 1.", + "sockets": "Sockets specifies the number of sockets inside the vmi.\nMust be a value greater or equal 1.", + "threads": "Threads specifies the number of threads inside the vmi.\nMust be a value greater or equal 1.", + "model": "Model specifies the CPU model inside the VMI.\nList of available models https://github.com/libvirt/libvirt/tree/master/src/cpu_map.\nIt is possible to specify special cases like \"host-passthrough\" to get the same CPU as the node\nand \"host-model\" to get CPU closest to the node one.\nDefaults to host-model.\n+optional", + "features": "Features specifies the CPU features list inside the VMI.\n+optional", + "dedicatedCpuPlacement": "DedicatedCPUPlacement requests the scheduler to place the VirtualMachineInstance on a node\nwith enough dedicated pCPUs and pin the vCPUs to it.\n+optional", + "numa": "NUMA allows specifying settings for the guest NUMA topology\n+optional", + "isolateEmulatorThread": "IsolateEmulatorThread requests one more dedicated pCPU to be allocated for the VMI to place\nthe emulator thread on it.\n+optional", + "realtime": "Realtime instructs the virt-launcher to tune the VMI for lower latency, optional for real time workloads\n+optional", + } +} + +func (Realtime) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Realtime holds the tuning knobs specific for realtime workloads.", + "mask": "Mask defines the vcpu mask expression that defines which vcpus are used for realtime. Format matches libvirt's expressions.\nExample: \"0-3,^1\",\"0,2,3\",\"2-3\"\n+optional", + } +} + +func (NUMAGuestMappingPassthrough) SwaggerDoc() map[string]string { + return map[string]string{ + "": "NUMAGuestMappingPassthrough instructs kubevirt to model numa topology which is compatible with the CPU pinning on the guest.\nThis will result in a subset of the node numa topology being passed through, ensuring that virtual numa nodes and their memory\nnever cross boundaries coming from the node numa mapping.", + } +} + +func (NUMA) SwaggerDoc() map[string]string { + return map[string]string{ + "guestMappingPassthrough": "GuestMappingPassthrough will create an efficient guest topology based on host CPUs exclusively assigned to a pod.\nThe created topology ensures that memory and CPUs on the virtual numa nodes never cross boundaries of host numa nodes.\n+opitonal", + } +} + +func (CPUFeature) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CPUFeature allows specifying a CPU feature.", + "name": "Name of the CPU feature", + "policy": "Policy is the CPU feature attribute which can have the following attributes:\nforce - The virtual CPU will claim the feature is supported regardless of it being supported by host CPU.\nrequire - Guest creation will fail unless the feature is supported by the host CPU or the hypervisor is able to emulate it.\noptional - The feature will be supported by virtual CPU if and only if it is supported by host CPU.\ndisable - The feature will not be supported by virtual CPU.\nforbid - Guest creation will fail if the feature is supported by host CPU.\nDefaults to require\n+optional", + } +} + +func (Memory) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Memory allows specifying the VirtualMachineInstance memory features.", + "hugepages": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.\n+optional", + "guest": "Guest allows to specifying the amount of memory which is visible inside the Guest OS.\nThe Guest must lie between Requests and Limits from the resources section.\nDefaults to the requested memory in the resources section if not specified.\n+ optional", + } +} + +func (Hugepages) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Hugepages allow to use hugepages for the VirtualMachineInstance instead of regular memory.", + "pageSize": "PageSize specifies the hugepage size, for x86_64 architecture valid values are 1Gi and 2Mi.", + } +} + +func (Machine) SwaggerDoc() map[string]string { + return map[string]string{ + "type": "QEMU machine type is the actual chipset of the VirtualMachineInstance.\n+optional", + } +} + +func (Firmware) SwaggerDoc() map[string]string { + return map[string]string{ + "uuid": "UUID reported by the vmi bios.\nDefaults to a random generated uid.", + "bootloader": "Settings to control the bootloader that is used.\n+optional", + "serial": "The system-serial-number in SMBIOS", + "kernelBoot": "Settings to set the kernel for booting.\n+optional", + } +} + +func (Devices) SwaggerDoc() map[string]string { + return map[string]string{ + "useVirtioTransitional": "Fall back to legacy virtio 0.9 support if virtio bus is selected on devices.\nThis is helpful for old machines like CentOS6 or RHEL6 which\ndo not understand virtio_non_transitional (virtio 1.0).", + "disableHotplug": "DisableHotplug disabled the ability to hotplug disks.", + "disks": "Disks describes disks, cdroms and luns which are connected to the vmi.", + "watchdog": "Watchdog describes a watchdog device which can be added to the vmi.", + "interfaces": "Interfaces describe network interfaces which are added to the vmi.", + "inputs": "Inputs describe input devices", + "autoattachPodInterface": "Whether to attach a pod network interface. Defaults to true.", + "autoattachGraphicsDevice": "Whether to attach the default graphics device or not.\nVNC will not be available if set to false. Defaults to true.", + "autoattachSerialConsole": "Whether to attach the default serial console or not.\nSerial console access will not be available if set to false. Defaults to true.", + "autoattachMemBalloon": "Whether to attach the Memory balloon device with default period.\nPeriod can be adjusted in virt-config.\nDefaults to true.\n+optional", + "autoattachInputDevice": "Whether to attach an Input Device.\nDefaults to false.\n+optional", + "autoattachVSOCK": "Whether to attach the VSOCK CID to the VM or not.\nVSOCK access will be available if set to true. Defaults to false.", + "rng": "Whether to have random number generator from host\n+optional", + "blockMultiQueue": "Whether or not to enable virtio multi-queue for block devices.\nDefaults to false.\n+optional", + "networkInterfaceMultiqueue": "If specified, virtual network interfaces configured with a virtio bus will also enable the vhost multiqueue feature for network devices. The number of queues created depends on additional factors of the VirtualMachineInstance, like the number of guest CPUs.\n+optional", + "gpus": "Whether to attach a GPU device to the vmi.\n+optional\n+listType=atomic", + "filesystems": "Filesystems describes filesystem which is connected to the vmi.\n+optional\n+listType=atomic", + "hostDevices": "Whether to attach a host device to the vmi.\n+optional\n+listType=atomic", + "clientPassthrough": "To configure and access client devices such as redirecting USB\n+optional", + "sound": "Whether to emulate a sound device.\n+optional", + "tpm": "Whether to emulate a TPM device.\n+optional", + } +} + +func (ClientPassthroughDevices) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represent a subset of client devices that can be accessed by VMI. At the\nmoment only, USB devices using Usbredir's library and tooling. Another fit\nwould be a smartcard with libcacard.\n\nThe struct is currently empty as there is no immediate request for\nuser-facing APIs. This structure simply turns on USB redirection of\nUsbClientPassthroughMaxNumberOf devices.", + } +} + +func (SoundDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the user's configuration to emulate sound cards in the VMI.", + "name": "User's defined name for this sound device", + "model": "We only support ich9 or ac97.\nIf SoundDevice is not set: No sound card is emulated.\nIf SoundDevice is set but Model is not: ich9\n+optional", + } +} + +func (TPMDevice) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (Input) SwaggerDoc() map[string]string { + return map[string]string{ + "bus": "Bus indicates the bus of input device to emulate.\nSupported values: virtio, usb.", + "type": "Type indicated the type of input device.\nSupported values: tablet.", + "name": "Name is the device name", + } +} + +func (Filesystem) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name is the device name", + "virtiofs": "Virtiofs is supported", + } +} + +func (FilesystemVirtiofs) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (GPU) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name of the GPU device as exposed by a device plugin", + "tag": "If specified, the virtual network interface address and its tag will be provided to the guest via config drive\n+optional", + } +} + +func (VGPUOptions) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (VGPUDisplayOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "enabled": "Enabled determines if a display addapter backed by a vGPU should be enabled or disabled on the guest.\nDefaults to true.\n+optional", + "ramFB": "Enables a boot framebuffer, until the guest OS loads a real GPU driver\nDefaults to true.\n+optional", + } +} + +func (HostDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "deviceName": "DeviceName is the resource name of the host device exposed by a device plugin", + "tag": "If specified, the virtual network interface address and its tag will be provided to the guest via config drive\n+optional", + } +} + +func (Disk) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name is the device name", + "bootOrder": "BootOrder is an integer value > 0, used to determine ordering of boot devices.\nLower values take precedence.\nEach disk or interface that has a boot order must have a unique value.\nDisks without a boot order are not tried if a disk with a boot order exists.\n+optional", + "serial": "Serial provides the ability to specify a serial number for the disk device.\n+optional", + "dedicatedIOThread": "dedicatedIOThread indicates this disk should have an exclusive IO Thread.\nEnabling this implies useIOThreads = true.\nDefaults to false.\n+optional", + "cache": "Cache specifies which kvm disk cache mode should be used.\nSupported values are: CacheNone, CacheWriteThrough.\n+optional", + "io": "IO specifies which QEMU disk IO mode should be used.\nSupported values are: native, default, threads.\n+optional", + "tag": "If specified, disk address and its tag will be provided to the guest via config drive metadata\n+optional", + "blockSize": "If specified, the virtual disk will be presented with the given block sizes.\n+optional", + "shareable": "If specified the disk is made sharable and multiple write from different VMs are permitted\n+optional", + } +} + +func (CustomBlockSize) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CustomBlockSize represents the desired logical and physical block size for a VM disk.", + } +} + +func (BlockSize) SwaggerDoc() map[string]string { + return map[string]string{ + "": "BlockSize provides the option to change the block size presented to the VM for a disk.\nOnly one of its members may be specified.", + } +} + +func (DiskDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the target of a volume to mount.\nOnly one of its members may be specified.", + "disk": "Attach a volume as a disk to the vmi.", + "lun": "Attach a volume as a LUN to the vmi.", + "cdrom": "Attach a volume as a cdrom to the vmi.", + } +} + +func (DiskTarget) SwaggerDoc() map[string]string { + return map[string]string{ + "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi, usb.", + "readonly": "ReadOnly.\nDefaults to false.", + "pciAddress": "If specified, the virtual disk will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10\n+optional", + } +} + +func (LaunchSecurity) SwaggerDoc() map[string]string { + return map[string]string{ + "sev": "AMD Secure Encrypted Virtualization (SEV).", + } +} + +func (SEV) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (LunTarget) SwaggerDoc() map[string]string { + return map[string]string{ + "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi.", + "readonly": "ReadOnly.\nDefaults to false.", + } +} + +func (CDRomTarget) SwaggerDoc() map[string]string { + return map[string]string{ + "bus": "Bus indicates the type of disk device to emulate.\nsupported values: virtio, sata, scsi.", + "readonly": "ReadOnly.\nDefaults to true.", + "tray": "Tray indicates if the tray of the device is open or closed.\nAllowed values are \"open\" and \"closed\".\nDefaults to closed.\n+optional", + } +} + +func (Volume) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Volume represents a named volume in a vmi.", + "name": "Volume's name.\nMust be a DNS_LABEL and unique within the vmi.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + } +} + +func (VolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the source of a volume to mount.\nOnly one of its members may be specified.", + "hostDisk": "HostDisk represents a disk created on the cluster level\n+optional", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional", + "cloudInitNoCloud": "CloudInitNoCloud represents a cloud-init NoCloud user-data source.\nThe NoCloud data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest.\nMore info: http://cloudinit.readthedocs.io/en/latest/topics/datasources/nocloud.html\n+optional", + "cloudInitConfigDrive": "CloudInitConfigDrive represents a cloud-init Config Drive user-data source.\nThe Config Drive data will be added as a disk to the vmi. A proper cloud-init installation is required inside the guest.\nMore info: https://cloudinit.readthedocs.io/en/latest/topics/datasources/configdrive.html\n+optional", + "sysprep": "Represents a Sysprep volume source.\n+optional", + "containerDisk": "ContainerDisk references a docker image, embedding a qcow or raw disk.\nMore info: https://kubevirt.gitbooks.io/user-guide/registry-disk.html\n+optional", + "ephemeral": "Ephemeral is a special volume source that \"wraps\" specified source and provides copy-on-write image on top of it.\n+optional", + "emptyDisk": "EmptyDisk represents a temporary disk which shares the vmis lifecycle.\nMore info: https://kubevirt.gitbooks.io/user-guide/disks-and-volumes.html\n+optional", + "dataVolume": "DataVolume represents the dynamic creation a PVC for this volume as well as\nthe process of populating that PVC with a disk image.\n+optional", + "configMap": "ConfigMapSource represents a reference to a ConfigMap in the same namespace.\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/\n+optional", + "secret": "SecretVolumeSource represents a reference to a secret data in the same namespace.\nMore info: https://kubernetes.io/docs/concepts/configuration/secret/\n+optional", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume\n+optional", + "serviceAccount": "ServiceAccountVolumeSource represents a reference to a service account.\nThere can only be one volume of this type!\nMore info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/\n+optional", + "downwardMetrics": "DownwardMetrics adds a very small disk to VMIs which contains a limited view of host and guest\nmetrics. The disk content is compatible with vhostmd (https://github.com/vhostmd/vhostmd) and vm-dump-metrics.", + "memoryDump": "MemoryDump is attached to the virt launcher and is populated with a memory dump of the vmi", + } +} + +func (HotplugVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "HotplugVolumeSource Represents the source of a volume to mount which are capable\nof being hotplugged on a live running VMI.\nOnly one of its members may be specified.", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional", + "dataVolume": "DataVolume represents the dynamic creation a PVC for this volume as well as\nthe process of populating that PVC with a disk image.\n+optional", + } +} + +func (DataVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name of both the DataVolume and the PVC in the same namespace.\nAfter PVC population the DataVolume is garbage collected by default.", + "hotpluggable": "Hotpluggable indicates whether the volume can be hotplugged and hotunplugged.\n+optional", + } +} + +func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "hotpluggable": "Hotpluggable indicates whether the volume can be hotplugged and hotunplugged.\n+optional", + } +} + +func (MemoryDumpVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (EphemeralVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace.\nDirectly attached to the vmi via qemu.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims\n+optional", + } +} + +func (EmptyDiskSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "EmptyDisk represents a temporary disk which shares the vmis lifecycle.", + "capacity": "Capacity of the sparse disk.", + } +} + +func (ContainerDiskSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents a docker image with an embedded disk.", + "image": "Image is the name of the image with the embedded disk.", + "imagePullSecret": "ImagePullSecret is the name of the Docker registry secret required to pull the image. The secret must already exist.", + "path": "Path defines the path to disk file in the container", + "imagePullPolicy": "Image pull policy.\nOne of Always, Never, IfNotPresent.\nDefaults to Always if :latest tag is specified, or IfNotPresent otherwise.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/containers/images#updating-images\n+optional", + } +} + +func (ClockOffset) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Exactly one of its members must be set.", + "utc": "UTC sets the guest clock to UTC on each boot. If an offset is specified,\nguest changes to the clock will be kept during reboots and are not reset.", + "timezone": "Timezone sets the guest clock to the specified timezone.\nZone name follows the TZ environment variable format (e.g. 'America/New_York').", + } +} + +func (ClockOffsetUTC) SwaggerDoc() map[string]string { + return map[string]string{ + "": "UTC sets the guest clock to UTC on each boot.", + "offsetSeconds": "OffsetSeconds specifies an offset in seconds, relative to UTC. If set,\nguest changes to the clock will be kept during reboots and not reset.", + } +} + +func (Clock) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the clock and timers of a vmi.\n+kubebuilder:pruning:PreserveUnknownFields", + "timer": "Timer specifies whih timers are attached to the vmi.\n+optional", + } +} + +func (Timer) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents all available timers in a vmi.", + "hpet": "HPET (High Precision Event Timer) - multiple timers with periodic interrupts.", + "kvm": "KVM \t(KVM clock) - lets guests read the host’s wall clock time (paravirtualized). For linux guests.", + "pit": "PIT (Programmable Interval Timer) - a timer with periodic interrupts.", + "rtc": "RTC (Real Time Clock) - a continuously running timer with periodic interrupts.", + "hyperv": "Hyperv (Hypervclock) - lets guests read the host’s wall clock time (paravirtualized). For windows guests.", + } +} + +func (RTCTimer) SwaggerDoc() map[string]string { + return map[string]string{ + "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\".", + "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", + "track": "Track the guest or the wall clock.", + } +} + +func (HPETTimer) SwaggerDoc() map[string]string { + return map[string]string{ + "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\", \"merge\", \"discard\".", + "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", + } +} + +func (PITTimer) SwaggerDoc() map[string]string { + return map[string]string{ + "tickPolicy": "TickPolicy determines what happens when QEMU misses a deadline for injecting a tick to the guest.\nOne of \"delay\", \"catchup\", \"discard\".", + "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", + } +} + +func (KVMTimer) SwaggerDoc() map[string]string { + return map[string]string{ + "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", + } +} + +func (HypervTimer) SwaggerDoc() map[string]string { + return map[string]string{ + "present": "Enabled set to false makes sure that the machine type or a preset can't add the timer.\nDefaults to true.\n+optional", + } +} + +func (Features) SwaggerDoc() map[string]string { + return map[string]string{ + "acpi": "ACPI enables/disables ACPI inside the guest.\nDefaults to enabled.\n+optional", + "apic": "Defaults to the machine type setting.\n+optional", + "hyperv": "Defaults to the machine type setting.\n+optional", + "smm": "SMM enables/disables System Management Mode.\nTSEG not yet implemented.\n+optional", + "kvm": "Configure how KVM presence is exposed to the guest.\n+optional", + "pvspinlock": "Notify the guest that the host supports paravirtual spinlocks.\nFor older kernels this feature should be explicitly disabled.\n+optional", + } +} + +func (SyNICTimer) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (FeatureState) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents if a feature is enabled or disabled.", + "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", + } +} + +func (FeatureAPIC) SwaggerDoc() map[string]string { + return map[string]string{ + "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", + "endOfInterrupt": "EndOfInterrupt enables the end of interrupt notification in the guest.\nDefaults to false.\n+optional", + } +} + +func (FeatureSpinlocks) SwaggerDoc() map[string]string { + return map[string]string{ + "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", + "spinlocks": "Retries indicates the number of retries.\nMust be a value greater or equal 4096.\nDefaults to 4096.\n+optional", + } +} + +func (FeatureVendorID) SwaggerDoc() map[string]string { + return map[string]string{ + "enabled": "Enabled determines if the feature should be enabled or disabled on the guest.\nDefaults to true.\n+optional", + "vendorid": "VendorID sets the hypervisor vendor id, visible to the vmi.\nString up to twelve characters.", + } +} + +func (FeatureHyperv) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Hyperv specific features.", + "relaxed": "Relaxed instructs the guest OS to disable watchdog timeouts.\nDefaults to the machine type setting.\n+optional", + "vapic": "VAPIC improves the paravirtualized handling of interrupts.\nDefaults to the machine type setting.\n+optional", + "spinlocks": "Spinlocks allows to configure the spinlock retry attempts.\n+optional", + "vpindex": "VPIndex enables the Virtual Processor Index to help windows identifying virtual processors.\nDefaults to the machine type setting.\n+optional", + "runtime": "Runtime improves the time accounting to improve scheduling in the guest.\nDefaults to the machine type setting.\n+optional", + "synic": "SyNIC enables the Synthetic Interrupt Controller.\nDefaults to the machine type setting.\n+optional", + "synictimer": "SyNICTimer enables Synthetic Interrupt Controller Timers, reducing CPU load.\nDefaults to the machine type setting.\n+optional", + "reset": "Reset enables Hyperv reboot/reset for the vmi. Requires synic.\nDefaults to the machine type setting.\n+optional", + "vendorid": "VendorID allows setting the hypervisor vendor id.\nDefaults to the machine type setting.\n+optional", + "frequencies": "Frequencies improves the TSC clock source handling for Hyper-V on KVM.\nDefaults to the machine type setting.\n+optional", + "reenlightenment": "Reenlightenment enables the notifications on TSC frequency changes.\nDefaults to the machine type setting.\n+optional", + "tlbflush": "TLBFlush improves performances in overcommited environments. Requires vpindex.\nDefaults to the machine type setting.\n+optional", + "ipi": "IPI improves performances in overcommited environments. Requires vpindex.\nDefaults to the machine type setting.\n+optional", + "evmcs": "EVMCS Speeds up L2 vmexits, but disables other virtualization features. Requires vapic.\nDefaults to the machine type setting.\n+optional", + } +} + +func (FeatureKVM) SwaggerDoc() map[string]string { + return map[string]string{ + "hidden": "Hide the KVM hypervisor from standard MSR based discovery.\nDefaults to false", + } +} + +func (Watchdog) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Named watchdog device.", + "name": "Name of the watchdog.", + } +} + +func (WatchdogDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Hardware watchdog device.\nExactly one of its members must be set.", + "i6300esb": "i6300esb watchdog device.\n+optional", + } +} + +func (I6300ESBWatchdog) SwaggerDoc() map[string]string { + return map[string]string{ + "": "i6300esb watchdog device.", + "action": "The action to take. Valid values are poweroff, reset, shutdown.\nDefaults to reset.", + } +} + +func (Interface) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Logical name of the interface as well as a reference to the associated networks.\nMust match the Name of a Network.", + "model": "Interface model.\nOne of: e1000, e1000e, ne2k_pci, pcnet, rtl8139, virtio.\nDefaults to virtio.", + "ports": "List of ports to be forwarded to the virtual machine.", + "macAddress": "Interface MAC address. For example: de:ad:00:00:be:af or DE-AD-00-00-BE-AF.", + "bootOrder": "BootOrder is an integer value > 0, used to determine ordering of boot devices.\nLower values take precedence.\nEach interface or disk that has a boot order must have a unique value.\nInterfaces without a boot order are not tried.\n+optional", + "pciAddress": "If specified, the virtual network interface will be placed on the guests pci address with the specified PCI address. For example: 0000:81:01.10\n+optional", + "dhcpOptions": "If specified the network interface will pass additional DHCP options to the VMI\n+optional", + "tag": "If specified, the virtual network interface address and its tag will be provided to the guest via config drive\n+optional", + "acpiIndex": "If specified, the ACPI index is used to provide network interface device naming, that is stable across changes\nin PCI addresses assigned to the device.\nThis value is required to be unique across all devices and be between 1 and (16*1024-1).\n+optional", + } +} + +func (DHCPOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Extra DHCP options to use in the interface.", + "bootFileName": "If specified will pass option 67 to interface's DHCP server\n+optional", + "tftpServerName": "If specified will pass option 66 to interface's DHCP server\n+optional", + "ntpServers": "If specified will pass the configured NTP server to the VM via DHCP option 042.\n+optional", + "privateOptions": "If specified will pass extra DHCP options for private use, range: 224-254\n+optional", + } +} + +func (DHCPPrivateOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DHCPExtraOptions defines Extra DHCP options for a VM.", + "option": "Option is an Integer value from 224-254\nRequired.", + "value": "Value is a String value for the Option provided\nRequired.", + } +} + +func (InterfaceBindingMethod) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the method which will be used to connect the interface to the guest.\nOnly one of its members may be specified.", + } +} + +func (InterfaceBridge) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfaceBridge connects to a given network via a linux bridge.", + } +} + +func (InterfaceSlirp) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfaceSlirp connects to a given network using QEMU user networking mode.", + } +} + +func (InterfaceMasquerade) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfaceMasquerade connects to a given network using netfilter rules to nat the traffic.", + } +} + +func (InterfaceSRIOV) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfaceSRIOV connects to a given network by passing-through an SR-IOV PCI device via vfio.", + } +} + +func (InterfaceMacvtap) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfaceMacvtap connects to a given network by extending the Kubernetes node's L2 networks via a macvtap interface.", + } +} + +func (InterfacePasst) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InterfacePasst connects to a given network.", + } +} + +func (Port) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Port represents a port to expose from the virtual machine.\nDefault protocol TCP.\nThe port field is mandatory", + "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each\nnamed port in a pod must have a unique name. Name for the port that can be\nreferred to by services.\n+optional", + "protocol": "Protocol for port. Must be UDP or TCP.\nDefaults to \"TCP\".\n+optional", + "port": "Number of port to expose for the virtual machine.\nThis must be a valid port number, 0 < x < 65536.", + } +} + +func (AccessCredentialSecretSource) SwaggerDoc() map[string]string { + return map[string]string{ + "secretName": "SecretName represents the name of the secret in the VMI's namespace", + } +} + +func (ConfigDriveSSHPublicKeyAccessCredentialPropagation) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (AuthorizedKeysFile) SwaggerDoc() map[string]string { + return map[string]string{ + "": "AuthorizedKeysFile represents a path within the guest\nthat ssh public keys should be propagated to", + "filePath": "FilePath represents the place on the guest that the authorized_keys\nfile should be writen to. This is expected to be a full path including\nboth the base directory and file name.", + } +} + +func (QemuGuestAgentUserPasswordAccessCredentialPropagation) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (QemuGuestAgentSSHPublicKeyAccessCredentialPropagation) SwaggerDoc() map[string]string { + return map[string]string{ + "users": "Users represents a list of guest users that should have the ssh public keys\nadded to their authorized_keys file.\n+listType=set", + } +} + +func (SSHPublicKeyAccessCredentialSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "SSHPublicKeyAccessCredentialSource represents where to retrieve the ssh key\ncredentials\nOnly one of its members may be specified.", + "secret": "Secret means that the access credential is pulled from a kubernetes secret\n+optional", + } +} + +func (SSHPublicKeyAccessCredentialPropagationMethod) SwaggerDoc() map[string]string { + return map[string]string{ + "": "SSHPublicKeyAccessCredentialPropagationMethod represents the method used to\ninject a ssh public key into the vm guest.\nOnly one of its members may be specified.", + "configDrive": "ConfigDrivePropagation means that the ssh public keys are injected\ninto the VM using metadata using the configDrive cloud-init provider\n+optional", + "qemuGuestAgent": "QemuGuestAgentAccessCredentailPropagation means ssh public keys are\ndynamically injected into the vm at runtime via the qemu guest agent.\nThis feature requires the qemu guest agent to be running within the guest.\n+optional", + } +} + +func (SSHPublicKeyAccessCredential) SwaggerDoc() map[string]string { + return map[string]string{ + "": "SSHPublicKeyAccessCredential represents a source and propagation method for\ninjecting ssh public keys into a vm guest", + "source": "Source represents where the public keys are pulled from", + "propagationMethod": "PropagationMethod represents how the public key is injected into the vm guest.", + } +} + +func (UserPasswordAccessCredentialSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "UserPasswordAccessCredentialSource represents where to retrieve the user password\ncredentials\nOnly one of its members may be specified.", + "secret": "Secret means that the access credential is pulled from a kubernetes secret\n+optional", + } +} + +func (UserPasswordAccessCredentialPropagationMethod) SwaggerDoc() map[string]string { + return map[string]string{ + "": "UserPasswordAccessCredentialPropagationMethod represents the method used to\ninject a user passwords into the vm guest.\nOnly one of its members may be specified.", + "qemuGuestAgent": "QemuGuestAgentAccessCredentailPropagation means passwords are\ndynamically injected into the vm at runtime via the qemu guest agent.\nThis feature requires the qemu guest agent to be running within the guest.\n+optional", + } +} + +func (UserPasswordAccessCredential) SwaggerDoc() map[string]string { + return map[string]string{ + "": "UserPasswordAccessCredential represents a source and propagation method for\ninjecting user passwords into a vm guest\nOnly one of its members may be specified.", + "source": "Source represents where the user passwords are pulled from", + "propagationMethod": "propagationMethod represents how the user passwords are injected into the vm guest.", + } +} + +func (AccessCredential) SwaggerDoc() map[string]string { + return map[string]string{ + "": "AccessCredential represents a credential source that can be used to\nauthorize remote access to the vm guest\nOnly one of its members may be specified.", + "sshPublicKey": "SSHPublicKey represents the source and method of applying a ssh public\nkey into a guest virtual machine.\n+optional", + "userPassword": "UserPassword represents the source and method for applying a guest user's\npassword\n+optional", + } +} + +func (Network) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Network represents a network type and a resource that should be connected to the vm.", + "name": "Network name.\nMust be a DNS_LABEL and unique within the vm.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + } +} + +func (NetworkSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the source resource that will be connected to the vm.\nOnly one of its members may be specified.", + } +} + +func (PodNetwork) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the stock pod network interface.", + "vmNetworkCIDR": "CIDR for vm network.\nDefault 10.0.2.0/24 if not specified.", + "vmIPv6NetworkCIDR": "IPv6 CIDR for the vm network.\nDefaults to fd10:0:2::/120 if not specified.", + } +} + +func (Rng) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Rng represents the random device passed from host", + } +} + +func (MultusNetwork) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Represents the multus cni network.", + "networkName": "References to a NetworkAttachmentDefinition CRD object. Format:\n, /. If namespace is not\nspecified, VMI namespace is assumed.", + "default": "Select the default network and add it to the\nmultus-cni.io/default-network annotation.", + } +} diff --git a/vendor/kubevirt.io/api/core/v1/types.go b/vendor/kubevirt.io/api/core/v1/types.go new file mode 100644 index 000000000..1ac4fefaf --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/types.go @@ -0,0 +1,2527 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2017 Red Hat, Inc. + * + */ + +package v1 + +/* + ATTENTION: Rerun code generators when comments on structs or fields are modified. +*/ + +import ( + "encoding/json" + "fmt" + + k8sv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" +) + +const DefaultGracePeriodSeconds int64 = 30 + +// VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type VirtualMachineInstance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // VirtualMachineInstance Spec contains the VirtualMachineInstance specification. + Spec VirtualMachineInstanceSpec `json:"spec" valid:"required"` + // Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users. + Status VirtualMachineInstanceStatus `json:"status,omitempty"` +} + +func (v *VirtualMachineInstance) MarshalBinary() (data []byte, err error) { + return json.Marshal(*v) +} + +func (v *VirtualMachineInstance) UnmarshalBinary(data []byte) error { + return json.Unmarshal(data, v) +} + +// VirtualMachineInstanceList is a list of VirtualMachines +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstance `json:"items"` +} + +type EvictionStrategy string + +type StartStrategy string + +const ( + StartStrategyPaused StartStrategy = "Paused" +) + +// VirtualMachineInstanceSpec is a description of a VirtualMachineInstance. +type VirtualMachineInstanceSpec struct { + + // If specified, indicates the pod's priority. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty"` + + // Specification of the desired behavior of the VirtualMachineInstance on the host. + Domain DomainSpec `json:"domain"` + // NodeSelector is a selector which must be true for the vmi to fit on a node. + // Selector which must match a node's labels for the vmi to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // If affinity is specifies, obey all the affinity rules + Affinity *k8sv1.Affinity `json:"affinity,omitempty"` + // If specified, the VMI will be dispatched by specified scheduler. + // If not specified, the VMI will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty"` + // If toleration is specified, obey all the toleration rules. + Tolerations []k8sv1.Toleration `json:"tolerations,omitempty"` + // TopologySpreadConstraints describes how a group of VMIs will be spread across a given topology + // domains. K8s scheduler will schedule VMI pods in a way which abides by the constraints. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []k8sv1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey"` + // EvictionStrategy can be set to "LiveMigrate" if the VirtualMachineInstance should be + // migrated instead of shut-off in case of a node drain. + // + // +optional + EvictionStrategy *EvictionStrategy `json:"evictionStrategy,omitempty"` + // StartStrategy can be set to "Paused" if Virtual Machine should be started in paused state. + // + // +optional + StartStrategy *StartStrategy `json:"startStrategy,omitempty"` + // Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated. + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"` + // List of volumes that can be mounted by disks belonging to the vmi. + Volumes []Volume `json:"volumes,omitempty"` + // Periodic probe of VirtualMachineInstance liveness. + // VirtualmachineInstances will be stopped if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty"` + // Periodic probe of VirtualMachineInstance service readiness. + // VirtualmachineInstances will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty"` + // Specifies the hostname of the vmi + // If not specified, the hostname will be set to the name of the vmi, if dhcp or cloud-init is configured properly. + // +optional + Hostname string `json:"hostname,omitempty"` + // If specified, the fully qualified vmi hostname will be "...svc.". + // If not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi, + // no matter if the vmi itself can pick up a hostname. + // +optional + Subdomain string `json:"subdomain,omitempty"` + // List of networks that can be attached to a vm's virtual interface. + Networks []Network `json:"networks,omitempty"` + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy k8sv1.DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *k8sv1.PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + // Specifies a set of public keys to inject into the vm guest + // +listType=atomic + // +optional + AccessCredentials []AccessCredential `json:"accessCredentials,omitempty"` +} + +func (vmiSpec *VirtualMachineInstanceSpec) UnmarshalJSON(data []byte) error { + type VMISpecAlias VirtualMachineInstanceSpec + var vmiSpecAlias VMISpecAlias + + if err := json.Unmarshal(data, &vmiSpecAlias); err != nil { + return err + } + + if vmiSpecAlias.DNSConfig != nil { + for i, ns := range vmiSpecAlias.DNSConfig.Nameservers { + if sanitizedIP, err := sanitizeIP(ns); err == nil { + vmiSpecAlias.DNSConfig.Nameservers[i] = sanitizedIP + } + } + } + + *vmiSpec = VirtualMachineInstanceSpec(vmiSpecAlias) + return nil +} + +// VirtualMachineInstancePhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi +type VirtualMachineInstancePhaseTransitionTimestamp struct { + // Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it. + Phase VirtualMachineInstancePhase `json:"phase,omitempty"` + // PhaseTransitionTimestamp is the timestamp of when the phase change occurred + PhaseTransitionTimestamp metav1.Time `json:"phaseTransitionTimestamp,omitempty"` +} + +type TopologyHints struct { + TSCFrequency *int64 `json:"tscFrequency,omitempty"` +} + +// VirtualMachineInstanceStatus represents information about the status of a VirtualMachineInstance. Status may trail the actual +// state of a system. +type VirtualMachineInstanceStatus struct { + // NodeName is the name where the VirtualMachineInstance is currently running. + NodeName string `json:"nodeName,omitempty"` + // A brief CamelCase message indicating details about why the VMI is in this state. e.g. 'NodeUnresponsive' + // +optional + Reason string `json:"reason,omitempty"` + // Conditions are specific points in VirtualMachineInstance's pod runtime. + Conditions []VirtualMachineInstanceCondition `json:"conditions,omitempty"` + // Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it. + Phase VirtualMachineInstancePhase `json:"phase,omitempty"` + // PhaseTransitionTimestamp is the timestamp of when the last phase change occurred + // +listType=atomic + // +optional + PhaseTransitionTimestamps []VirtualMachineInstancePhaseTransitionTimestamp `json:"phaseTransitionTimestamps,omitempty"` + // Interfaces represent the details of available network interfaces. + Interfaces []VirtualMachineInstanceNetworkInterface `json:"interfaces,omitempty"` + // Guest OS Information + GuestOSInfo VirtualMachineInstanceGuestOSInfo `json:"guestOSInfo,omitempty"` + // Represents the status of a live migration + MigrationState *VirtualMachineInstanceMigrationState `json:"migrationState,omitempty"` + // Represents the method using which the vmi can be migrated: live migration or block migration + MigrationMethod VirtualMachineInstanceMigrationMethod `json:"migrationMethod,omitempty"` + // This represents the migration transport + MigrationTransport VirtualMachineInstanceMigrationTransport `json:"migrationTransport,omitempty"` + // The Quality of Service (QOS) classification assigned to the virtual machine instance based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // +optional + QOSClass *k8sv1.PodQOSClass `json:"qosClass,omitempty"` + + // LauncherContainerImageVersion indicates what container image is currently active for the vmi. + LauncherContainerImageVersion string `json:"launcherContainerImageVersion,omitempty"` + + // EvacuationNodeName is used to track the eviction process of a VMI. It stores the name of the node that we want + // to evacuate. It is meant to be used by KubeVirt core components only and can't be set or modified by users. + // +optional + EvacuationNodeName string `json:"evacuationNodeName,omitempty"` + + // ActivePods is a mapping of pod UID to node name. + // It is possible for multiple pods to be running for a single VMI during migration. + ActivePods map[types.UID]string `json:"activePods,omitempty"` + + // VolumeStatus contains the statuses of all the volumes + // +optional + // +listType=atomic + VolumeStatus []VolumeStatus `json:"volumeStatus,omitempty"` + + // FSFreezeStatus is the state of the fs of the guest + // it can be either frozen or thawed + // +optional + FSFreezeStatus string `json:"fsFreezeStatus,omitempty"` + + // +optional + TopologyHints *TopologyHints `json:"topologyHints,omitempty"` + + //VirtualMachineRevisionName is used to get the vm revision of the vmi when doing + // an online vm snapshot + // +optional + VirtualMachineRevisionName string `json:"virtualMachineRevisionName,omitempty"` + + // RuntimeUser is used to determine what user will be used in launcher + // +optional + RuntimeUser uint64 `json:"runtimeUser"` + + // VSOCKCID is used to track the allocated VSOCK CID in the VM. + // +optional + VSOCKCID *uint32 `json:"VSOCKCID,omitempty"` + + // SELinuxContext is the actual SELinux context of the virt-launcher pod + // +optional + SelinuxContext string `json:"selinuxContext,omitempty"` +} + +// PersistentVolumeClaimInfo contains the relavant information virt-handler needs cached about a PVC +type PersistentVolumeClaimInfo struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +listType=atomic + // +optional + AccessModes []k8sv1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + + // VolumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + VolumeMode *k8sv1.PersistentVolumeMode `json:"volumeMode,omitempty"` + + // Capacity represents the capacity set on the corresponding PVC status + // +optional + Capacity k8sv1.ResourceList `json:"capacity,omitempty"` + + // Requests represents the resources requested by the corresponding PVC spec + // +optional + Requests k8sv1.ResourceList `json:"requests,omitempty"` + + // Preallocated indicates if the PVC's storage is preallocated or not + // +optional + Preallocated bool `json:"preallocated,omitempty"` + + // Percentage of filesystem's size to be reserved when resizing the PVC + // +optional + FilesystemOverhead *cdiv1.Percent `json:"filesystemOverhead,omitempty"` +} + +// VolumeStatus represents information about the status of volumes attached to the VirtualMachineInstance. +type VolumeStatus struct { + // Name is the name of the volume + Name string `json:"name"` + // Target is the target name used when adding the volume to the VM, eg: vda + Target string `json:"target"` + // Phase is the phase + Phase VolumePhase `json:"phase,omitempty"` + // Reason is a brief description of why we are in the current hotplug volume phase + Reason string `json:"reason,omitempty"` + // Message is a detailed message about the current hotplug volume phase + Message string `json:"message,omitempty"` + // PersistentVolumeClaimInfo is information about the PVC that handler requires during start flow + PersistentVolumeClaimInfo *PersistentVolumeClaimInfo `json:"persistentVolumeClaimInfo,omitempty"` + // If the volume is hotplug, this will contain the hotplug status. + HotplugVolume *HotplugVolumeStatus `json:"hotplugVolume,omitempty"` + // Represents the size of the volume + Size int64 `json:"size,omitempty"` + // If the volume is memorydump volume, this will contain the memorydump info. + MemoryDumpVolume *DomainMemoryDumpInfo `json:"memoryDumpVolume,omitempty"` +} + +// DomainMemoryDumpInfo represents the memory dump information +type DomainMemoryDumpInfo struct { + // StartTimestamp is the time when the memory dump started + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + // EndTimestamp is the time when the memory dump completed + EndTimestamp *metav1.Time `json:"endTimestamp,omitempty"` + // ClaimName is the name of the pvc the memory was dumped to + ClaimName string `json:"claimName,omitempty"` + // TargetFileName is the name of the memory dump output + TargetFileName string `json:"targetFileName,omitempty"` +} + +// HotplugVolumeStatus represents the hotplug status of the volume +type HotplugVolumeStatus struct { + // AttachPodName is the name of the pod used to attach the volume to the node. + AttachPodName string `json:"attachPodName,omitempty"` + // AttachPodUID is the UID of the pod used to attach the volume to the node. + AttachPodUID types.UID `json:"attachPodUID,omitempty"` +} + +// VolumePhase indicates the current phase of the hotplug process. +type VolumePhase string + +const ( + // VolumePending means the Volume is pending and cannot be attached to the node yet. + VolumePending VolumePhase = "Pending" + // VolumeBound means the Volume is bound and can be attach to the node. + VolumeBound VolumePhase = "Bound" + // HotplugVolumeAttachedToNode means the volume has been attached to the node. + HotplugVolumeAttachedToNode VolumePhase = "AttachedToNode" + // HotplugVolumeMounted means the volume has been attached to the node and is mounted to the virt-launcher pod. + HotplugVolumeMounted VolumePhase = "MountedToPod" + // VolumeReady means the volume is ready to be used by the VirtualMachineInstance. + VolumeReady VolumePhase = "Ready" + // HotplugVolumeDetaching means the volume is being detached from the node, and the attachment pod is being removed. + HotplugVolumeDetaching VolumePhase = "Detaching" + // HotplugVolumeUnMounted means the volume has been unmounted from the virt-launcer pod. + HotplugVolumeUnMounted VolumePhase = "UnMountedFromPod" + // MemoryDumpVolumeCompleted means that the requested memory dump was completed and the dump is ready in the volume + MemoryDumpVolumeCompleted VolumePhase = "MemoryDumpCompleted" + // MemoryDumpVolumeInProgress means that the volume for the memory dump was attached, and now the command is being triggered + MemoryDumpVolumeInProgress VolumePhase = "MemoryDumpInProgress" + // MemoryDumpVolumeInProgress means that the volume for the memory dump was attached, and now the command is being triggered + MemoryDumpVolumeFailed VolumePhase = "MemoryDumpFailed" +) + +func (v *VirtualMachineInstance) IsScheduling() bool { + return v.Status.Phase == Scheduling +} + +func (v *VirtualMachineInstance) IsScheduled() bool { + return v.Status.Phase == Scheduled +} + +func (v *VirtualMachineInstance) IsRunning() bool { + return v.Status.Phase == Running +} + +func (v *VirtualMachineInstance) IsMarkedForEviction() bool { + return v.Status.EvacuationNodeName != "" +} + +func (v *VirtualMachineInstance) IsMigratable() bool { + for _, cond := range v.Status.Conditions { + if cond.Type == VirtualMachineInstanceIsMigratable && cond.Status == k8sv1.ConditionTrue { + return true + } + } + return false +} + +func (v *VirtualMachineInstance) IsFinal() bool { + return v.Status.Phase == Failed || v.Status.Phase == Succeeded +} + +func (v *VirtualMachineInstance) IsMarkedForDeletion() bool { + return v.ObjectMeta.DeletionTimestamp != nil +} + +func (v *VirtualMachineInstance) IsUnknown() bool { + return v.Status.Phase == Unknown +} + +func (v *VirtualMachineInstance) IsUnprocessed() bool { + return v.Status.Phase == Pending || v.Status.Phase == VmPhaseUnset +} + +// Checks if CPU pinning has been requested +func (v *VirtualMachineInstance) IsCPUDedicated() bool { + return v.Spec.Domain.CPU != nil && v.Spec.Domain.CPU.DedicatedCPUPlacement +} + +func (v *VirtualMachineInstance) IsBootloaderEFI() bool { + return v.Spec.Domain.Firmware != nil && v.Spec.Domain.Firmware.Bootloader != nil && + v.Spec.Domain.Firmware.Bootloader.EFI != nil +} + +// WantsToHaveQOSGuaranteed checks if cpu and memoyr limits and requests are identical on the VMI. +// This is the indicator that people want a VMI with QOS of guaranteed +func (v *VirtualMachineInstance) WantsToHaveQOSGuaranteed() bool { + resources := v.Spec.Domain.Resources + return !resources.Requests.Memory().IsZero() && resources.Requests.Memory().Cmp(*resources.Limits.Memory()) == 0 && + !resources.Requests.Cpu().IsZero() && resources.Requests.Cpu().Cmp(*resources.Limits.Cpu()) == 0 +} + +// ShouldStartPaused returns true if VMI should be started in paused state +func (v *VirtualMachineInstance) ShouldStartPaused() bool { + return v.Spec.StartStrategy != nil && *v.Spec.StartStrategy == StartStrategyPaused +} + +func (v *VirtualMachineInstance) IsRealtimeEnabled() bool { + return v.Spec.Domain.CPU != nil && v.Spec.Domain.CPU.Realtime != nil +} + +type VirtualMachineInstanceConditionType string + +// These are valid conditions of VMIs. +const ( + // Provisioning means, a VMI depends on DataVolumes which are in Pending/WaitForFirstConsumer status, + // and some actions are taken to provision the PVCs for the DataVolumes + VirtualMachineInstanceProvisioning VirtualMachineInstanceConditionType = "Provisioning" + + // Ready means the VMI is able to service requests and should be added to the + // load balancing pools of all matching services. + VirtualMachineInstanceReady VirtualMachineInstanceConditionType = "Ready" + + // If there happens any error while trying to synchronize the VirtualMachineInstance with the Domain, + // this is reported as false. + VirtualMachineInstanceSynchronized VirtualMachineInstanceConditionType = "Synchronized" + + // If the VMI was paused by the user, this is reported as true. + VirtualMachineInstancePaused VirtualMachineInstanceConditionType = "Paused" + + // Reflects whether the QEMU guest agent is connected through the channel + VirtualMachineInstanceAgentConnected VirtualMachineInstanceConditionType = "AgentConnected" + + // Reflects whether the QEMU guest agent updated access credentials successfully + VirtualMachineInstanceAccessCredentialsSynchronized VirtualMachineInstanceConditionType = "AccessCredentialsSynchronized" + + // Reflects whether the QEMU guest agent is connected through the channel + VirtualMachineInstanceUnsupportedAgent VirtualMachineInstanceConditionType = "AgentVersionNotSupported" + + // Indicates whether the VMI is live migratable + VirtualMachineInstanceIsMigratable VirtualMachineInstanceConditionType = "LiveMigratable" + // Reason means that VMI is not live migratioable because of it's disks collection + VirtualMachineInstanceReasonDisksNotMigratable = "DisksNotLiveMigratable" + // Reason means that VMI is not live migratioable because of it's network interfaces collection + VirtualMachineInstanceReasonInterfaceNotMigratable = "InterfaceNotLiveMigratable" + // Reason means that VMI is not live migratioable because it uses hotplug + VirtualMachineInstanceReasonHotplugNotMigratable = "HotplugNotLiveMigratable" + // Reason means that VMI is not live migratioable because of it's CPU mode + VirtualMachineInstanceReasonCPUModeNotMigratable = "CPUModeLiveMigratable" + // Reason means that VMI is not live migratable because it uses virtiofs + VirtualMachineInstanceReasonVirtIOFSNotMigratable = "VirtIOFSNotLiveMigratable" + // Reason means that VMI is not live migratable because it uses PCI host devices + VirtualMachineInstanceReasonHostDeviceNotMigratable = "HostDeviceNotLiveMigratable" + // Reason means that VMI is not live migratable because it uses Secure Encrypted Virtualization (SEV) + VirtualMachineInstanceReasonSEVNotMigratable = "SEVNotLiveMigratable" + // Reason means that VMI is not live migratable because it uses HyperV Reenlightenment while TSC Frequency is not available + VirtualMachineInstanceReasonNoTSCFrequencyMigratable = "NoTSCFrequencyNotLiveMigratable" + // Reason means that VMI is not live migratable because it uses dedicated CPU and emulator thread isolation + VirtualMachineInstanceReasonDedicatedCPU = "DedicatedCPUNotLiveMigratable" +) + +const ( + // PodTerminatingReason indicates on the Ready condition on the VMI if the underlying pod is terminating + PodTerminatingReason = "PodTerminating" + + // PodNotExistsReason indicates on the Ready condition on the VMI if the underlying pod does not exist + PodNotExistsReason = "PodNotExists" + + // PodConditionMissingReason indicates on the Ready condition on the VMI if the underlying pod does not report a Ready condition + PodConditionMissingReason = "PodConditionMissing" + + // GuestNotRunningReason indicates on the Ready condition on the VMI if the underlying guest VM is not running + GuestNotRunningReason = "GuestNotRunning" +) + +type VirtualMachineInstanceMigrationConditionType string + +// These are valid conditions of VMIs. +const ( + // VirtualMachineInstanceMigrationAbortRequested indicates that live migration abort has been requested + VirtualMachineInstanceMigrationAbortRequested VirtualMachineInstanceMigrationConditionType = "migrationAbortRequested" +) + +type VirtualMachineInstanceCondition struct { + Type VirtualMachineInstanceConditionType `json:"type"` + Status k8sv1.ConditionStatus `json:"status"` + // +nullable + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type VirtualMachineInstanceMigrationCondition struct { + Type VirtualMachineInstanceMigrationConditionType `json:"type"` + Status k8sv1.ConditionStatus `json:"status"` + // +nullable + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// The migration phase indicates that the job has completed +func (m *VirtualMachineInstanceMigration) IsFinal() bool { + return m.Status.Phase == MigrationFailed || m.Status.Phase == MigrationSucceeded +} + +func (m *VirtualMachineInstanceMigration) IsRunning() bool { + switch m.Status.Phase { + case MigrationFailed, MigrationPending, MigrationPhaseUnset, MigrationSucceeded: + return false + } + return true +} + +// The migration phase indicates that the target pod should have already been created +func (m *VirtualMachineInstanceMigration) TargetIsCreated() bool { + return m.Status.Phase != MigrationPhaseUnset && + m.Status.Phase != MigrationPending +} + +// The migration phase indicates that job has been handed off to the VMI controllers to complete. +func (m *VirtualMachineInstanceMigration) TargetIsHandedOff() bool { + return m.Status.Phase != MigrationPhaseUnset && + m.Status.Phase != MigrationPending && + m.Status.Phase != MigrationScheduling && + m.Status.Phase != MigrationScheduled +} + +type VirtualMachineInstanceNetworkInterface struct { + // IP address of a Virtual Machine interface. It is always the first item of + // IPs + IP string `json:"ipAddress,omitempty"` + // Hardware address of a Virtual Machine interface + MAC string `json:"mac,omitempty"` + // Name of the interface, corresponds to name of the network assigned to the interface + Name string `json:"name,omitempty"` + // List of all IP addresses of a Virtual Machine interface + IPs []string `json:"ipAddresses,omitempty"` + // The interface name inside the Virtual Machine + InterfaceName string `json:"interfaceName,omitempty"` + // Specifies the origin of the interface data collected. values: domain, guest-agent, or both + InfoSource string `json:"infoSource,omitempty"` + // Specifies how many queues are allocated by MultiQueue + QueueCount int32 `json:"queueCount,omitempty"` +} + +type VirtualMachineInstanceGuestOSInfo struct { + // Name of the Guest OS + Name string `json:"name,omitempty"` + // Guest OS Kernel Release + KernelRelease string `json:"kernelRelease,omitempty"` + // Guest OS Version + Version string `json:"version,omitempty"` + // Guest OS Pretty Name + PrettyName string `json:"prettyName,omitempty"` + // Version ID of the Guest OS + VersionID string `json:"versionId,omitempty"` + // Kernel version of the Guest OS + KernelVersion string `json:"kernelVersion,omitempty"` + // Machine type of the Guest OS + Machine string `json:"machine,omitempty"` + // Guest OS Id + ID string `json:"id,omitempty"` +} + +// MigrationConfigSource indicates the source of migration configuration. +// +// +k8s:openapi-gen=true +type MigrationConfigSource string + +// +k8s:openapi-gen=true +type VirtualMachineInstanceMigrationState struct { + // The time the migration action began + // +nullable + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + // The time the migration action ended + // +nullable + EndTimestamp *metav1.Time `json:"endTimestamp,omitempty"` + + // The Target Node has seen the Domain Start Event + TargetNodeDomainDetected bool `json:"targetNodeDomainDetected,omitempty"` + // The address of the target node to use for the migration + TargetNodeAddress string `json:"targetNodeAddress,omitempty"` + // The list of ports opened for live migration on the destination node + TargetDirectMigrationNodePorts map[string]int `json:"targetDirectMigrationNodePorts,omitempty"` + // The target node that the VMI is moving to + TargetNode string `json:"targetNode,omitempty"` + // The target pod that the VMI is moving to + TargetPod string `json:"targetPod,omitempty"` + // The UID of the target attachment pod for hotplug volumes + TargetAttachmentPodUID types.UID `json:"targetAttachmentPodUID,omitempty"` + // The source node that the VMI originated on + SourceNode string `json:"sourceNode,omitempty"` + // Indicates the migration completed + Completed bool `json:"completed,omitempty"` + // Indicates that the migration failed + Failed bool `json:"failed,omitempty"` + // Indicates that the migration has been requested to abort + AbortRequested bool `json:"abortRequested,omitempty"` + // Indicates the final status of the live migration abortion + AbortStatus MigrationAbortStatus `json:"abortStatus,omitempty"` + // The VirtualMachineInstanceMigration object associated with this migration + MigrationUID types.UID `json:"migrationUid,omitempty"` + // Lets us know if the vmi is currently running pre or post copy migration + Mode MigrationMode `json:"mode,omitempty"` + // Name of the migration policy. If string is empty, no policy is matched + MigrationPolicyName *string `json:"migrationPolicyName,omitempty"` + // Migration configurations to apply + MigrationConfiguration *MigrationConfiguration `json:"migrationConfiguration,omitempty"` + // If the VMI requires dedicated CPUs, this field will + // hold the dedicated CPU set on the target node + // +listType=atomic + TargetCPUSet []int `json:"targetCPUSet,omitempty"` + // If the VMI requires dedicated CPUs, this field will + // hold the numa topology on the target node + TargetNodeTopology string `json:"targetNodeTopology,omitempty"` +} + +type MigrationAbortStatus string + +const ( + // MigrationAbortSucceeded means that the VirtualMachineInstance live migration has been aborted + MigrationAbortSucceeded MigrationAbortStatus = "Succeeded" + // MigrationAbortFailed means that the vmi live migration has failed to be abort + MigrationAbortFailed MigrationAbortStatus = "Failed" + // MigrationAbortInProgress mean that the vmi live migration is aborting + MigrationAbortInProgress MigrationAbortStatus = "Aborting" +) + +type MigrationMode string + +const ( + // MigrationPreCopy means the VMI migrations that is currently running is in pre copy mode + MigrationPreCopy MigrationMode = "PreCopy" + // MigrationPostCopy means the VMI migrations that is currently running is in post copy mode + MigrationPostCopy MigrationMode = "PostCopy" +) + +type VirtualMachineInstanceMigrationTransport string + +const ( + // MigrationTransportUnix means that the VMI will be migrated using the unix URI + MigrationTransportUnix VirtualMachineInstanceMigrationTransport = "Unix" +) + +type VirtualMachineInstanceMigrationMethod string + +const ( + // BlockMigration means that all VirtualMachineInstance disks should be copied over to the destination host + BlockMigration VirtualMachineInstanceMigrationMethod = "BlockMigration" + // LiveMigration means that VirtualMachineInstance disks will not be copied over to the destination host + LiveMigration VirtualMachineInstanceMigrationMethod = "LiveMigration" +) + +// VirtualMachineInstancePhase is a label for the condition of a VirtualMachineInstance at the current time. +type VirtualMachineInstancePhase string + +// These are the valid statuses of pods. +const ( + //When a VirtualMachineInstance Object is first initialized and no phase, or Pending is present. + VmPhaseUnset VirtualMachineInstancePhase = "" + // Pending means the VirtualMachineInstance has been accepted by the system. + Pending VirtualMachineInstancePhase = "Pending" + // A target Pod exists but is not yet scheduled and in running state. + Scheduling VirtualMachineInstancePhase = "Scheduling" + // A target pod was scheduled and the system saw that Pod in runnig state. + // Here is where the responsibility of virt-controller ends and virt-handler takes over. + Scheduled VirtualMachineInstancePhase = "Scheduled" + // Running means the pod has been bound to a node and the VirtualMachineInstance is started. + Running VirtualMachineInstancePhase = "Running" + // Succeeded means that the VirtualMachineInstance stopped voluntarily, e.g. reacted to SIGTERM or shutdown was invoked from + // inside the VirtualMachineInstance. + Succeeded VirtualMachineInstancePhase = "Succeeded" + // Failed means that the vmi crashed, disappeared unexpectedly or got deleted from the cluster before it was ever started. + Failed VirtualMachineInstancePhase = "Failed" + // Unknown means that for some reason the state of the VirtualMachineInstance could not be obtained, typically due + // to an error in communicating with the host of the VirtualMachineInstance. + Unknown VirtualMachineInstancePhase = "Unknown" +) + +const ( + // AppLabel and AppName labels marks resources that belong to KubeVirt. An optional value + // may indicate which specific KubeVirt component a resource belongs to. + AppLabel string = "kubevirt.io" + AppName string = "name" + // This annotation is used to match virtual machine instances represented as + // libvirt XML domains with their pods. Among other things, the annotation is + // used to detect virtual machines with dead pods. Used on Pod. + DomainAnnotation string = "kubevirt.io/domain" + // Represents the name of the migration job this target pod is associated with + MigrationJobNameAnnotation string = "kubevirt.io/migrationJobName" + ControllerAPILatestVersionObservedAnnotation string = "kubevirt.io/latest-observed-api-version" + ControllerAPIStorageVersionObservedAnnotation string = "kubevirt.io/storage-observed-api-version" + // Used by functional tests to force a VMI to fail the migration internally within launcher + FuncTestForceLauncherMigrationFailureAnnotation string = "kubevirt.io/func-test-force-launcher-migration-failure" + // Used by functional tests to prevent virt launcher from finishing the target pod preparation. + FuncTestBlockLauncherPrepareMigrationTargetAnnotation string = "kubevirt.io/func-test-block-migration-target-preparation" + + // Used by functional tests set custom image on migration target pod + FuncTestMigrationTargetImageOverrideAnnotation string = "kubevirt.io/func-test-migration-target-image-override" + + // Used by functional tests to simulate virt-launcher crash looping + FuncTestLauncherFailFastAnnotation string = "kubevirt.io/func-test-virt-launcher-fail-fast" + + // Used by functional tests to ignore backoff applied to migrations + FuncTestForceIgnoreMigrationBackoffAnnotation string = "kubevirt.io/func-test-ignore-migration-backoff" + + // This label is used to match virtual machine instance IDs with pods. + // Similar to kubevirt.io/domain. Used on Pod. + // Internal use only. + CreatedByLabel string = "kubevirt.io/created-by" + // This label is used to indicate that this pod is the target of a migration job. + MigrationJobLabel string = "kubevirt.io/migrationJobUID" + // This label indicates the migration name that a PDB is protecting. + MigrationNameLabel string = "kubevirt.io/migrationName" + // This label describes which cluster node runs the virtual machine + // instance. Needed because with CRDs we can't use field selectors. Used on + // VirtualMachineInstance. + NodeNameLabel string = "kubevirt.io/nodeName" + // This label describes which cluster node runs the target Pod for a Virtual + // Machine Instance migration job. Needed because with CRDs we can't use field + // selectors. Used on VirtualMachineInstance. + MigrationTargetNodeNameLabel string = "kubevirt.io/migrationTargetNodeName" + // This annotation indicates that a migration is the result of an + // automated evacuation + EvacuationMigrationAnnotation string = "kubevirt.io/evacuationMigration" + // This annotation indicates that a migration is the result of an + // automated workload update + WorkloadUpdateMigrationAnnotation string = "kubevirt.io/workloadUpdateMigration" + // This label declares whether a particular node is available for + // scheduling virtual machine instances on it. Used on Node. + NodeSchedulable string = "kubevirt.io/schedulable" + // This annotation is regularly updated by virt-handler to help determine + // if a particular node is alive and hence should be available for new + // virtual machine instance scheduling. Used on Node. + VirtHandlerHeartbeat string = "kubevirt.io/heartbeat" + // This label indicates what launcher image a VMI is currently running with. + OutdatedLauncherImageLabel string = "kubevirt.io/outdatedLauncherImage" + // Namespace recommended by Kubernetes for commonly recognized labels + AppLabelPrefix = "app.kubernetes.io" + // This label is commonly used by 3rd party management tools to identify + // an application's name. + AppNameLabel = AppLabelPrefix + "/name" + // This label is commonly used by 3rd party management tools to identify + // an application's version. + AppVersionLabel = AppLabelPrefix + "/version" + // This label is commonly used by 3rd party management tools to identify + // a higher level application. + AppPartOfLabel = AppLabelPrefix + "/part-of" + // This label is commonly used by 3rd party management tools to identify + // the component this application is a part of. + AppComponentLabel = AppLabelPrefix + "/component" + // This label identifies each resource as part of KubeVirt + AppComponent = "kubevirt" + // This label will be set on all resources created by the operator + ManagedByLabel = AppLabelPrefix + "/managed-by" + ManagedByLabelOperatorValue = "virt-operator" + ManagedByLabelOperatorOldValue = "kubevirt-operator" + // This annotation represents the kubevirt version for an install strategy configmap. + InstallStrategyVersionAnnotation = "kubevirt.io/install-strategy-version" + // This annotation represents the kubevirt registry used for an install strategy configmap. + InstallStrategyRegistryAnnotation = "kubevirt.io/install-strategy-registry" + // This annotation represents the kubevirt deployment identifier used for an install strategy configmap. + InstallStrategyIdentifierAnnotation = "kubevirt.io/install-strategy-identifier" + // This annotation shows the enconding used for the manifests in the Install Strategy ConfigMap. + InstallStrategyConfigMapEncoding = "kubevirt.io/install-strategy-cm-encoding" + // This annotation is a hash of all customizations that live under spec.CustomizeComponents + KubeVirtCustomizeComponentAnnotationHash = "kubevirt.io/customizer-identifier" + // This annotation represents the kubevirt generation that was used to create a resource + KubeVirtGenerationAnnotation = "kubevirt.io/generation" + // This annotation represents that this object is for temporary use during updates + EphemeralBackupObject = "kubevirt.io/ephemeral-backup-object" + // This annotation represents that the annotated object is for temporary use during pod/volume provisioning + EphemeralProvisioningObject string = "kubevirt.io/ephemeral-provisioning" + + // This label indicates the object is a part of the install strategy retrieval process. + InstallStrategyLabel = "kubevirt.io/install-strategy" + + // Set by virt-operator to coordinate component deletion + VirtOperatorComponentFinalizer string = "kubevirt.io/virtOperatorFinalizer" + + // Set by VMI controller to ensure VMIs are processed during deletion + VirtualMachineInstanceFinalizer string = "foregroundDeleteVirtualMachine" + // Set By VM controller on VMIs to ensure VMIs are processed by VM controller during deletion + VirtualMachineControllerFinalizer string = "kubevirt.io/virtualMachineControllerFinalize" + VirtualMachineInstanceMigrationFinalizer string = "kubevirt.io/migrationJobFinalize" + CPUManager string = "cpumanager" + // This annotation is used to inject ignition data + // Used on VirtualMachineInstance. + IgnitionAnnotation string = "kubevirt.io/ignitiondata" + PlacePCIDevicesOnRootComplex string = "kubevirt.io/placePCIDevicesOnRootComplex" + + // This label represents supported cpu features on the node + CPUFeatureLabel = "cpu-feature.node.kubevirt.io/" + // This label represents supported cpu models on the node + CPUModelLabel = "cpu-model.node.kubevirt.io/" + SupportedHostModelMigrationCPU = "cpu-model-migration.node.kubevirt.io/" + CPUTimerLabel = "cpu-timer.node.kubevirt.io/" + // This label represents supported HyperV features on the node + HypervLabel = "hyperv.node.kubevirt.io/" + // This label represents vendor of cpu model on the node + CPUModelVendorLabel = "cpu-vendor.node.kubevirt.io/" + + VirtIO = "virtio" + + // This label represents the host model CPU name + HostModelCPULabel = "host-model-cpu.node.kubevirt.io/" + // This label represents the host model required features + HostModelRequiredFeaturesLabel = "host-model-required-features.node.kubevirt.io/" + NodeHostModelIsObsoleteLabel = "node-labeller.kubevirt.io/obsolete-host-model" + + LabellerSkipNodeAnnotation = "node-labeller.kubevirt.io/skip-node" + VirtualMachineLabel = AppLabel + "/vm" + MemfdMemoryBackend string = "kubevirt.io/memfd" + + MigrationSelectorLabel = "kubevirt.io/vmi-name" + + // This annotation represents vmi running nonroot implementation + DeprecatedNonRootVMIAnnotation = "kubevirt.io/nonroot" + + // This annotation is to keep virt launcher container alive when an VMI encounters a failure for debugging purpose + KeepLauncherAfterFailureAnnotation string = "kubevirt.io/keep-launcher-alive-after-failure" + + // MigrationTransportUnixAnnotation means that the VMI will be migrated using the unix URI + MigrationTransportUnixAnnotation string = "kubevirt.io/migrationTransportUnix" + + // MigrationUnschedulablePodTimeoutSecondsAnnotation represents a custom timeout period used for unschedulable target pods + // This exists for functional testing + MigrationUnschedulablePodTimeoutSecondsAnnotation string = "kubevirt.io/migrationUnschedulablePodTimeoutSeconds" + + // MigrationPendingPodTimeoutSecondsAnnotation represents a custom timeout period used for target pods stuck in pending for any reason + // This exists for functional testing + MigrationPendingPodTimeoutSecondsAnnotation string = "kubevirt.io/migrationPendingPodTimeoutSeconds" + + // CustomLibvirtLogFiltersAnnotation can be used to customized libvirt log filters. Example value could be + // "3:remote 4:event 3:util.json 3:util.object 3:util.dbus 3:util.netlink 3:node_device 3:rpc 3:access 1:*". + // For more info: https://libvirt.org/kbase/debuglogs.html + CustomLibvirtLogFiltersAnnotation string = "kubevirt.io/libvirt-log-filters" + + // RealtimeLabel marks the node as capable of running realtime workloads + RealtimeLabel string = "kubevirt.io/realtime" + + // VirtualMachineUnpaused is a custom pod condition set for the virt-launcher pod. + // It's used as a readiness gate to prevent paused VMs from being marked as ready. + VirtualMachineUnpaused k8sv1.PodConditionType = "kubevirt.io/virtual-machine-unpaused" + + // SEVLabel marks the node as capable of running workloads with SEV + SEVLabel string = "kubevirt.io/sev" + + // InstancetypeAnnotation is the name of a VirtualMachineInstancetype + InstancetypeAnnotation string = "kubevirt.io/instancetype-name" + + // ClusterInstancetypeAnnotation is the name of a VirtualMachineClusterInstancetype + ClusterInstancetypeAnnotation string = "kubevirt.io/cluster-instancetype-name" + + // InstancetypeAnnotation is the name of a VirtualMachinePreference + PreferenceAnnotation string = "kubevirt.io/preference-name" + + // ClusterInstancetypeAnnotation is the name of a VirtualMachinePreferenceInstancetype + ClusterPreferenceAnnotation string = "kubevirt.io/cluster-preference-name" + + // VirtualMachinePoolRevisionName is used to store the vmpool revision's name this object + // originated from. + VirtualMachinePoolRevisionName string = "kubevirt.io/vm-pool-revision-name" + + // VirtualMachineNameLabel is the name of the Virtual Machine + VirtualMachineNameLabel string = "vm.kubevirt.io/name" + + // PVCMemoryDumpAnnotation is the name of the memory dump representing the vm name, + // pvc name and the timestamp the memory dump was collected + PVCMemoryDumpAnnotation string = "kubevirt.io/memory-dump" +) + +func NewVMI(name string, uid types.UID) *VirtualMachineInstance { + return &VirtualMachineInstance{ + Spec: VirtualMachineInstanceSpec{}, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: uid, + Namespace: k8sv1.NamespaceDefault, + }, + Status: VirtualMachineInstanceStatus{}, + TypeMeta: metav1.TypeMeta{ + APIVersion: GroupVersion.String(), + Kind: VirtualMachineInstanceGroupVersionKind.Kind, + }, + } +} + +type SyncEvent string + +const ( + Created SyncEvent = "Created" + Deleted SyncEvent = "Deleted" + PresetFailed SyncEvent = "PresetFailed" + Override SyncEvent = "Override" + Started SyncEvent = "Started" + ShuttingDown SyncEvent = "ShuttingDown" + Stopped SyncEvent = "Stopped" + PreparingTarget SyncEvent = "PreparingTarget" + Migrating SyncEvent = "Migrating" + Migrated SyncEvent = "Migrated" + SyncFailed SyncEvent = "SyncFailed" + Resumed SyncEvent = "Resumed" + AccessCredentialsSyncFailed SyncEvent = "AccessCredentialsSyncFailed" + AccessCredentialsSyncSuccess SyncEvent = "AccessCredentialsSyncSuccess" +) + +func (s SyncEvent) String() string { + return string(s) +} + +// TODO Namespace could be different, also store it somewhere in the domain, so that we can report deletes on handler startup properly +func NewVMIReferenceFromName(name string) *VirtualMachineInstance { + return NewVMIReferenceFromNameWithNS(k8sv1.NamespaceDefault, name) +} + +func NewVMIReferenceFromNameWithNS(namespace string, name string) *VirtualMachineInstance { + vmi := &VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + SelfLink: fmt.Sprintf("/apis/%s/namespaces/%s/virtualmachineinstances/%s", GroupVersion.String(), namespace, name), + }, + } + vmi.SetGroupVersionKind(schema.GroupVersionKind{Group: GroupVersion.Group, Kind: "VirtualMachineInstance", Version: GroupVersion.Version}) + return vmi +} + +func NewVMIReferenceWithUUID(namespace string, name string, uuid types.UID) *VirtualMachineInstance { + vmi := NewVMIReferenceFromNameWithNS(namespace, name) + vmi.UID = uuid + return vmi +} + +type VMISelector struct { + // Name of the VirtualMachineInstance to migrate + Name string `json:"name" valid:"required"` +} + +func NewVMReferenceFromNameWithNS(namespace string, name string) *VirtualMachine { + vm := &VirtualMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + SelfLink: fmt.Sprintf("/apis/%s/namespaces/%s/virtualmachines/%s", GroupVersion.String(), namespace, name), + }, + } + vm.SetGroupVersionKind(schema.GroupVersionKind{Group: GroupVersion.Group, Kind: "VirtualMachine", Version: GroupVersion.Version}) + return vm +} + +// Given a VirtualMachineInstance, update all NodeSelectorTerms with anti-affinity for that VirtualMachineInstance's node. +// This is useful for the case when a migration away from a node must occur. +// This method returns the full Affinity structure updated the anti affinity terms +func UpdateAntiAffinityFromVMINode(pod *k8sv1.Pod, vmi *VirtualMachineInstance) *k8sv1.Affinity { + if pod.Spec.Affinity == nil { + pod.Spec.Affinity = &k8sv1.Affinity{} + } + + if pod.Spec.Affinity.NodeAffinity == nil { + pod.Spec.Affinity.NodeAffinity = &k8sv1.NodeAffinity{} + } + + if pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &k8sv1.NodeSelector{} + } + + selector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution + terms := selector.NodeSelectorTerms + + if len(terms) == 0 { + selector.NodeSelectorTerms = append(terms, k8sv1.NodeSelectorTerm{}) + terms = selector.NodeSelectorTerms + } + + for idx, term := range terms { + if term.MatchExpressions == nil { + term.MatchExpressions = []k8sv1.NodeSelectorRequirement{} + } + + term.MatchExpressions = append(term.MatchExpressions, PrepareVMINodeAntiAffinitySelectorRequirement(vmi)) + selector.NodeSelectorTerms[idx] = term + } + + return pod.Spec.Affinity +} + +// Given a VirtualMachineInstance, create a NodeSelectorTerm with anti-affinity for that VirtualMachineInstance's node. +// This is useful for the case when a migration away from a node must occur. +func PrepareVMINodeAntiAffinitySelectorRequirement(vmi *VirtualMachineInstance) k8sv1.NodeSelectorRequirement { + return k8sv1.NodeSelectorRequirement{ + Key: "kubernetes.io/hostname", + Operator: k8sv1.NodeSelectorOpNotIn, + Values: []string{vmi.Status.NodeName}, + } +} + +// VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type VirtualMachineInstanceReplicaSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // VirtualMachineInstance Spec contains the VirtualMachineInstance specification. + Spec VirtualMachineInstanceReplicaSetSpec `json:"spec" valid:"required"` + // Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users. + // +nullable + Status VirtualMachineInstanceReplicaSetStatus `json:"status,omitempty"` +} + +// VMIList is a list of VMIs +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstanceReplicaSet `json:"items"` +} + +type VirtualMachineInstanceReplicaSetSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + Selector *metav1.LabelSelector `json:"selector" valid:"required"` + + // Template describes the pods that will be created. + Template *VirtualMachineInstanceTemplateSpec `json:"template" valid:"required"` + + // Indicates that the replica set is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` +} + +type VirtualMachineInstanceReplicaSetStatus struct { + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + Conditions []VirtualMachineInstanceReplicaSetCondition `json:"conditions,omitempty" optional:"true"` + + // Canonical form of the label selector for HPA which consumes it through the scale subresource. + LabelSelector string `json:"labelSelector,omitempty"` +} + +type VirtualMachineInstanceReplicaSetCondition struct { + Type VirtualMachineInstanceReplicaSetConditionType `json:"type"` + Status k8sv1.ConditionStatus `json:"status"` + // +nullable + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type VirtualMachineInstanceReplicaSetConditionType string + +const ( + // VirtualMachineInstanceReplicaSetReplicaFailure is added in a replica set when one of its vmis + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + VirtualMachineInstanceReplicaSetReplicaFailure VirtualMachineInstanceReplicaSetConditionType = "ReplicaFailure" + + // VirtualMachineInstanceReplicaSetReplicaPaused is added in a replica set when the replica set got paused by the controller. + // After this condition was added, it is safe to remove or add vmis by hand and adjust the replica count by hand. + VirtualMachineInstanceReplicaSetReplicaPaused VirtualMachineInstanceReplicaSetConditionType = "ReplicaPaused" +) + +type DataVolumeTemplateDummyStatus struct{} + +type DataVolumeTemplateSpec struct { + // TypeMeta only exists on DataVolumeTemplate for API backwards compatibility + // this field is not used by our controllers and is a no-op. + // +nullable + metav1.TypeMeta `json:",inline"` + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + metav1.ObjectMeta `json:"metadata,omitempty"` + // DataVolumeSpec contains the DataVolume specification. + Spec cdiv1.DataVolumeSpec `json:"spec"` + + // DataVolumeTemplateDummyStatus is here simply for backwards compatibility with + // a previous API. + // +nullable + // +optional + Status *DataVolumeTemplateDummyStatus `json:"status,omitempty"` +} + +type VirtualMachineInstanceTemplateSpec struct { + // +kubebuilder:pruning:PreserveUnknownFields + // +nullable + ObjectMeta metav1.ObjectMeta `json:"metadata,omitempty"` + // VirtualMachineInstance Spec contains the VirtualMachineInstance specification. + Spec VirtualMachineInstanceSpec `json:"spec,omitempty" valid:"required"` +} + +// VirtualMachineInstanceMigration represents the object tracking a VMI's migration +// to another host in the cluster +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type VirtualMachineInstanceMigration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec VirtualMachineInstanceMigrationSpec `json:"spec" valid:"required"` + Status VirtualMachineInstanceMigrationStatus `json:"status,omitempty"` +} + +// VirtualMachineInstanceMigrationList is a list of VirtualMachineMigrations +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceMigrationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstanceMigration `json:"items"` +} + +type VirtualMachineInstanceMigrationSpec struct { + // The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace + VMIName string `json:"vmiName,omitempty" valid:"required"` +} + +// VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi +type VirtualMachineInstanceMigrationPhaseTransitionTimestamp struct { + // Phase is the status of the VirtualMachineInstanceMigrationPhase in kubernetes world. It is not the VirtualMachineInstanceMigrationPhase status, but partially correlates to it. + Phase VirtualMachineInstanceMigrationPhase `json:"phase,omitempty"` + // PhaseTransitionTimestamp is the timestamp of when the phase change occurred + PhaseTransitionTimestamp metav1.Time `json:"phaseTransitionTimestamp,omitempty"` +} + +// VirtualMachineInstanceMigration reprents information pertaining to a VMI's migration. +type VirtualMachineInstanceMigrationStatus struct { + Phase VirtualMachineInstanceMigrationPhase `json:"phase,omitempty"` + Conditions []VirtualMachineInstanceMigrationCondition `json:"conditions,omitempty"` + // PhaseTransitionTimestamp is the timestamp of when the last phase change occurred + // +listType=atomic + // +optional + PhaseTransitionTimestamps []VirtualMachineInstanceMigrationPhaseTransitionTimestamp `json:"phaseTransitionTimestamps,omitempty"` + // Represents the status of a live migration + MigrationState *VirtualMachineInstanceMigrationState `json:"migrationState,omitempty"` +} + +// VirtualMachineInstanceMigrationPhase is a label for the condition of a VirtualMachineInstanceMigration at the current time. +type VirtualMachineInstanceMigrationPhase string + +// These are the valid migration phases +const ( + MigrationPhaseUnset VirtualMachineInstanceMigrationPhase = "" + // The migration is accepted by the system + MigrationPending VirtualMachineInstanceMigrationPhase = "Pending" + // The migration's target pod is being scheduled + MigrationScheduling VirtualMachineInstanceMigrationPhase = "Scheduling" + // The migration's target pod is running + MigrationScheduled VirtualMachineInstanceMigrationPhase = "Scheduled" + // The migration's target pod is being prepared for migration + MigrationPreparingTarget VirtualMachineInstanceMigrationPhase = "PreparingTarget" + // The migration's target pod is prepared and ready for migration + MigrationTargetReady VirtualMachineInstanceMigrationPhase = "TargetReady" + // The migration is in progress + MigrationRunning VirtualMachineInstanceMigrationPhase = "Running" + // The migration passed + MigrationSucceeded VirtualMachineInstanceMigrationPhase = "Succeeded" + // The migration failed + MigrationFailed VirtualMachineInstanceMigrationPhase = "Failed" +) + +// Deprecated for removal in v2, please use VirtualMachineInstanceType and VirtualMachinePreference instead. +// +// VirtualMachineInstancePreset defines a VMI spec.domain to be applied to all VMIs that match the provided label selector +// More info: https://kubevirt.io/user-guide/virtual_machines/presets/#overrides +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type VirtualMachineInstancePreset struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // VirtualMachineInstance Spec contains the VirtualMachineInstance specification. + Spec VirtualMachineInstancePresetSpec `json:"spec,omitempty" valid:"required"` +} + +// VirtualMachineInstancePresetList is a list of VirtualMachinePresets +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstancePresetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstancePreset `json:"items"` +} + +type VirtualMachineInstancePresetSpec struct { + // Selector is a label query over a set of VMIs. + // Required. + Selector metav1.LabelSelector `json:"selector"` + // Domain is the same object type as contained in VirtualMachineInstanceSpec + Domain *DomainSpec `json:"domain,omitempty"` +} + +func NewVirtualMachinePreset(name string, selector metav1.LabelSelector) *VirtualMachineInstancePreset { + return &VirtualMachineInstancePreset{ + Spec: VirtualMachineInstancePresetSpec{ + Selector: selector, + Domain: &DomainSpec{}, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k8sv1.NamespaceDefault, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: GroupVersion.String(), + Kind: VirtualMachineInstancePresetGroupVersionKind.Kind, + }, + } +} + +// VirtualMachine handles the VirtualMachines that are not running +// or are in a stopped state +// The VirtualMachine contains the template to create the +// VirtualMachineInstance. It also mirrors the running state of the created +// VirtualMachineInstance in its status. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type VirtualMachine struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Spec contains the specification of VirtualMachineInstance created + Spec VirtualMachineSpec `json:"spec" valid:"required"` + // Status holds the current state of the controller and brief information + // about its associated VirtualMachineInstance + Status VirtualMachineStatus `json:"status,omitempty"` +} + +// Return the current runStrategy for the VirtualMachine +// if vm.spec.running is set, that will be mapped to runStrategy: +// +// false: RunStrategyHalted +// true: RunStrategyAlways +func (vm *VirtualMachine) RunStrategy() (VirtualMachineRunStrategy, error) { + if vm.Spec.Running != nil && vm.Spec.RunStrategy != nil { + return RunStrategyUnknown, fmt.Errorf("running and runstrategy are mutually exclusive") + } + RunStrategy := RunStrategyHalted + if vm.Spec.Running != nil { + if (*vm.Spec.Running) == true { + RunStrategy = RunStrategyAlways + } + } else if vm.Spec.RunStrategy != nil { + RunStrategy = *vm.Spec.RunStrategy + } + return RunStrategy, nil +} + +// VirtualMachineList is a list of virtualmachines +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachine `json:"items"` +} + +// VirtualMachineRunStrategy is a label for the requested VirtualMachineInstance Running State at the current time. +type VirtualMachineRunStrategy string + +// These are the valid VMI run strategies +const ( + // Placeholder. Not a valid RunStrategy. + RunStrategyUnknown VirtualMachineRunStrategy = "" + // VMI should always be running. + RunStrategyAlways VirtualMachineRunStrategy = "Always" + // VMI should never be running. + RunStrategyHalted VirtualMachineRunStrategy = "Halted" + // VMI can be started/stopped using API endpoints. + RunStrategyManual VirtualMachineRunStrategy = "Manual" + // VMI will initially be running--and restarted if a failure occurs. + // It will not be restarted upon successful completion. + RunStrategyRerunOnFailure VirtualMachineRunStrategy = "RerunOnFailure" + // VMI will run once and not be restarted upon completion regardless + // if the completion is of phase Failure or Success + RunStrategyOnce VirtualMachineRunStrategy = "Once" +) + +// VirtualMachineSpec describes how the proper VirtualMachine +// should look like +type VirtualMachineSpec struct { + // Running controls whether the associatied VirtualMachineInstance is created or not + // Mutually exclusive with RunStrategy + Running *bool `json:"running,omitempty" optional:"true"` + + // Running state indicates the requested running state of the VirtualMachineInstance + // mutually exclusive with Running + RunStrategy *VirtualMachineRunStrategy `json:"runStrategy,omitempty" optional:"true"` + + // InstancetypeMatcher references a instancetype that is used to fill fields in Template + Instancetype *InstancetypeMatcher `json:"instancetype,omitempty" optional:"true"` + + // PreferenceMatcher references a set of preference that is used to fill fields in Template + Preference *PreferenceMatcher `json:"preference,omitempty" optional:"true"` + + // Template is the direct specification of VirtualMachineInstance + Template *VirtualMachineInstanceTemplateSpec `json:"template"` + + // dataVolumeTemplates is a list of dataVolumes that the VirtualMachineInstance template can reference. + // DataVolumes in this list are dynamically created for the VirtualMachine and are tied to the VirtualMachine's life-cycle. + DataVolumeTemplates []DataVolumeTemplateSpec `json:"dataVolumeTemplates,omitempty"` +} + +// StateChangeRequestType represents the existing state change requests that are possible +type StateChangeRequestAction string + +// These are the currently defined state change requests +const ( + StartRequest StateChangeRequestAction = "Start" + StopRequest StateChangeRequestAction = "Stop" +) + +// VirtualMachinePrintableStatus is a human readable, high-level representation of the status of the virtual machine. +type VirtualMachinePrintableStatus string + +// A list of statuses defined for virtual machines +const ( + // VirtualMachineStatusStopped indicates that the virtual machine is currently stopped and isn't expected to start. + VirtualMachineStatusStopped VirtualMachinePrintableStatus = "Stopped" + // VirtualMachineStatusProvisioning indicates that cluster resources associated with the virtual machine + // (e.g., DataVolumes) are being provisioned and prepared. + VirtualMachineStatusProvisioning VirtualMachinePrintableStatus = "Provisioning" + // VirtualMachineStatusStarting indicates that the virtual machine is being prepared for running. + VirtualMachineStatusStarting VirtualMachinePrintableStatus = "Starting" + // VirtualMachineStatusRunning indicates that the virtual machine is running. + VirtualMachineStatusRunning VirtualMachinePrintableStatus = "Running" + // VirtualMachineStatusPaused indicates that the virtual machine is paused. + VirtualMachineStatusPaused VirtualMachinePrintableStatus = "Paused" + // VirtualMachineStatusStopping indicates that the virtual machine is in the process of being stopped. + VirtualMachineStatusStopping VirtualMachinePrintableStatus = "Stopping" + // VirtualMachineStatusTerminating indicates that the virtual machine is in the process of deletion, + // as well as its associated resources (VirtualMachineInstance, DataVolumes, …). + VirtualMachineStatusTerminating VirtualMachinePrintableStatus = "Terminating" + // VirtualMachineStatusCrashLoopBackOff indicates that the virtual machine is currently in a crash loop waiting to be retried. + VirtualMachineStatusCrashLoopBackOff VirtualMachinePrintableStatus = "CrashLoopBackOff" + // VirtualMachineStatusMigrating indicates that the virtual machine is in the process of being migrated + // to another host. + VirtualMachineStatusMigrating VirtualMachinePrintableStatus = "Migrating" + // VirtualMachineStatusUnknown indicates that the state of the virtual machine could not be obtained, + // typically due to an error in communicating with the host on which it's running. + VirtualMachineStatusUnknown VirtualMachinePrintableStatus = "Unknown" + // VirtualMachineStatusUnschedulable indicates that an error has occurred while scheduling the virtual machine, + // e.g. due to unsatisfiable resource requests or unsatisfiable scheduling constraints. + VirtualMachineStatusUnschedulable VirtualMachinePrintableStatus = "ErrorUnschedulable" + // VirtualMachineStatusErrImagePull indicates that an error has occured while pulling an image for + // a containerDisk VM volume. + VirtualMachineStatusErrImagePull VirtualMachinePrintableStatus = "ErrImagePull" + // VirtualMachineStatusImagePullBackOff indicates that an error has occured while pulling an image for + // a containerDisk VM volume, and that kubelet is backing off before retrying. + VirtualMachineStatusImagePullBackOff VirtualMachinePrintableStatus = "ImagePullBackOff" + // VirtualMachineStatusPvcNotFound indicates that the virtual machine references a PVC volume which doesn't exist. + VirtualMachineStatusPvcNotFound VirtualMachinePrintableStatus = "ErrorPvcNotFound" + // VirtualMachineStatusDataVolumeError indicates that an error has been reported by one of the DataVolumes + // referenced by the virtual machines. + VirtualMachineStatusDataVolumeError VirtualMachinePrintableStatus = "DataVolumeError" + // VirtualMachineStatusWaitingForVolumeBinding indicates that some PersistentVolumeClaims backing + // the virtual machine volume are still not bound. + VirtualMachineStatusWaitingForVolumeBinding VirtualMachinePrintableStatus = "WaitingForVolumeBinding" +) + +// VirtualMachineStartFailure tracks VMIs which failed to transition successfully +// to running using the VM status +type VirtualMachineStartFailure struct { + ConsecutiveFailCount int `json:"consecutiveFailCount,omitempty"` + LastFailedVMIUID types.UID `json:"lastFailedVMIUID,omitempty"` + RetryAfterTimestamp *metav1.Time `json:"retryAfterTimestamp,omitempty"` +} + +// VirtualMachineStatus represents the status returned by the +// controller to describe how the VirtualMachine is doing +type VirtualMachineStatus struct { + // SnapshotInProgress is the name of the VirtualMachineSnapshot currently executing + SnapshotInProgress *string `json:"snapshotInProgress,omitempty"` + // RestoreInProgress is the name of the VirtualMachineRestore currently executing + RestoreInProgress *string `json:"restoreInProgress,omitempty"` + // Created indicates if the virtual machine is created in the cluster + Created bool `json:"created,omitempty"` + // Ready indicates if the virtual machine is running and ready + Ready bool `json:"ready,omitempty"` + // PrintableStatus is a human readable, high-level representation of the status of the virtual machine + PrintableStatus VirtualMachinePrintableStatus `json:"printableStatus,omitempty"` + // Hold the state information of the VirtualMachine and its VirtualMachineInstance + Conditions []VirtualMachineCondition `json:"conditions,omitempty" optional:"true"` + // StateChangeRequests indicates a list of actions that should be taken on a VMI + // e.g. stop a specific VMI then start a new one. + StateChangeRequests []VirtualMachineStateChangeRequest `json:"stateChangeRequests,omitempty" optional:"true"` + // VolumeRequests indicates a list of volumes add or remove from the VMI template and + // hotplug on an active running VMI. + // +listType=atomic + VolumeRequests []VirtualMachineVolumeRequest `json:"volumeRequests,omitempty" optional:"true"` + + // VolumeSnapshotStatuses indicates a list of statuses whether snapshotting is + // supported by each volume. + VolumeSnapshotStatuses []VolumeSnapshotStatus `json:"volumeSnapshotStatuses,omitempty" optional:"true"` + + // StartFailure tracks consecutive VMI startup failures for the purposes of + // crash loop backoffs + // +nullable + // +optional + StartFailure *VirtualMachineStartFailure `json:"startFailure,omitempty" optional:"true"` + + // MemoryDumpRequest tracks memory dump request phase and info of getting a memory + // dump to the given pvc + // +nullable + // +optional + MemoryDumpRequest *VirtualMachineMemoryDumpRequest `json:"memoryDumpRequest,omitempty" optional:"true"` +} + +type VolumeSnapshotStatus struct { + // Volume name + Name string `json:"name"` + // True if the volume supports snapshotting + Enabled bool `json:"enabled"` + // Empty if snapshotting is enabled, contains reason otherwise + Reason string `json:"reason,omitempty" optional:"true"` +} + +type VirtualMachineVolumeRequest struct { + // AddVolumeOptions when set indicates a volume should be added. The details + // within this field specify how to add the volume + AddVolumeOptions *AddVolumeOptions `json:"addVolumeOptions,omitempty" optional:"true"` + // RemoveVolumeOptions when set indicates a volume should be removed. The details + // within this field specify how to add the volume + RemoveVolumeOptions *RemoveVolumeOptions `json:"removeVolumeOptions,omitempty" optional:"true"` +} + +type VirtualMachineStateChangeRequest struct { + // Indicates the type of action that is requested. e.g. Start or Stop + Action StateChangeRequestAction `json:"action"` + // Provides additional data in order to perform the Action + Data map[string]string `json:"data,omitempty" optional:"true"` + // Indicates the UUID of an existing Virtual Machine Instance that this change request applies to -- if applicable + UID *types.UID `json:"uid,omitempty" optional:"true" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` +} + +// VirtualMachineCondition represents the state of VirtualMachine +type VirtualMachineCondition struct { + Type VirtualMachineConditionType `json:"type"` + Status k8sv1.ConditionStatus `json:"status"` + // +nullable + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type VirtualMachineConditionType string + +const ( + // VirtualMachineFailure is added in a virtual machine when its vmi + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + VirtualMachineFailure VirtualMachineConditionType = "Failure" + + // VirtualMachineReady is copied to the virtual machine from its vmi + VirtualMachineReady VirtualMachineConditionType = "Ready" + + // VirtualMachinePaused is added in a virtual machine when its vmi + // signals with its own condition that it is paused. + VirtualMachinePaused VirtualMachineConditionType = "Paused" +) + +type HostDiskType string + +const ( + // if disk does not exist at the given path, + // a disk image will be created there + HostDiskExistsOrCreate HostDiskType = "DiskOrCreate" + // a disk image must exist at given disk path + HostDiskExists HostDiskType = "Disk" +) + +type NetworkInterfaceType string + +const ( + // Virtual machine instance bride interface + BridgeInterface NetworkInterfaceType = "bridge" + // Virtual machine instance slirp interface + SlirpInterface NetworkInterfaceType = "slirp" + // Virtual machine instance masquerade interface + MasqueradeInterface NetworkInterfaceType = "masquerade" + // Virtual machine instance passt interface + PasstInterface NetworkInterfaceType = "passt" +) + +type DriverCache string + +type DriverIO string + +const ( + // CacheNone - I/O from the guest is not cached on the host, but may be kept in a writeback disk cache. + CacheNone DriverCache = "none" + // CacheWriteThrough - I/O from the guest is cached on the host but written through to the physical medium. + CacheWriteThrough DriverCache = "writethrough" + // CacheWriteBack - I/O from the guest is cached on the host. + CacheWriteBack DriverCache = "writeback" + + // IOThreads - User mode based threads with a shared lock that perform I/O tasks. Can impact performance but offers + // more predictable behaviour. This method is also takes fewer CPU cycles to submit I/O requests. + IOThreads DriverIO = "threads" + // IONative - Kernel native I/O tasks (AIO) offer a better performance but can block the VM if the file is not fully + // allocated so this method recommended only when the backing file/disk/etc is fully preallocated. + IONative DriverIO = "native" + // IODefault - Fallback to the default value from the kernel. With recent Kernel versions (for example RHEL-7) the + // default is AIO. + IODefault DriverIO = "default" +) + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take, it will be executed on the guest through the qemu-guest-agent. + // If the guest agent is not available, this probe will fail. + // +optional + Exec *k8sv1.ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // GuestAgentPing contacts the qemu-guest-agent for availability checks. + // +optional + GuestAgentPing *GuestAgentPing `json:"guestAgentPing,omitempty"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *k8sv1.HTTPGetAction `json:"httpGet,omitempty"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *k8sv1.TCPSocketAction `json:"tcpSocket,omitempty"` +} + +// Probe describes a health check to be performed against a VirtualMachineInstance to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a VirtualMachineInstance + Handler `json:",inline"` + // Number of seconds after the VirtualMachineInstance has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + // Number of seconds after which the probe times out. + // For exec probes the timeout fails the probe but does not terminate the command running on the guest. + // This means a blocking command can result in an increasing load on the guest. + // A small buffer will be added to the resulting workload exec probe to compensate for delays + // caused by the qemu guest exec mechanism. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty"` +} + +// KubeVirt represents the object deploying all KubeVirt resources +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +type KubeVirt struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec KubeVirtSpec `json:"spec" valid:"required"` + Status KubeVirtStatus `json:"status,omitempty"` +} + +// KubeVirtList is a list of KubeVirts +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type KubeVirtList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KubeVirt `json:"items"` +} + +type KubeVirtSelfSignConfiguration struct { + // Deprecated. Use CA.Duration instead + CARotateInterval *metav1.Duration `json:"caRotateInterval,omitempty"` + // Deprecated. Use Server.Duration instead + CertRotateInterval *metav1.Duration `json:"certRotateInterval,omitempty"` + // Deprecated. Use CA.Duration and CA.RenewBefore instead + CAOverlapInterval *metav1.Duration `json:"caOverlapInterval,omitempty"` + + // CA configuration + // CA certs are kept in the CA bundle as long as they are valid + CA *CertConfig `json:"ca,omitempty"` + + // Server configuration + // Certs are rotated and discarded + Server *CertConfig `json:"server,omitempty"` +} + +// CertConfig contains the tunables for TLS certificates +type CertConfig struct { + // The requested 'duration' (i.e. lifetime) of the Certificate. + Duration *metav1.Duration `json:"duration,omitempty"` + + // The amount of time before the currently issued certificate's "notAfter" + // time that we will begin to attempt to renew the certificate. + RenewBefore *metav1.Duration `json:"renewBefore,omitempty"` +} + +type KubeVirtCertificateRotateStrategy struct { + SelfSigned *KubeVirtSelfSignConfiguration `json:"selfSigned,omitempty"` +} + +type WorkloadUpdateMethod string + +const ( + // WorkloadUpdateMethodLiveMigrate allows VMIs which are capable of being + // migrated to automatically migrate during automated workload updates. + WorkloadUpdateMethodLiveMigrate WorkloadUpdateMethod = "LiveMigrate" + // WorkloadUpdateMethodEvict results in a VMI's pod being evicted. Unless the + // pod has a pod disruption budget allocated, the eviction will usually result in + // the VMI being shutdown. + // Depending on whether a VMI is backed by a VM or not, this will either result + // in a restart of the VM by rescheduling a new VMI, or the shutdown via eviction + // of a standalone VMI object. + WorkloadUpdateMethodEvict WorkloadUpdateMethod = "Evict" +) + +// KubeVirtWorkloadUpdateStrategy defines options related to updating a KubeVirt install +type KubeVirtWorkloadUpdateStrategy struct { + // WorkloadUpdateMethods defines the methods that can be used to disrupt workloads + // during automated workload updates. + // When multiple methods are present, the least disruptive method takes + // precedence over more disruptive methods. For example if both LiveMigrate and Shutdown + // methods are listed, only VMs which are not live migratable will be restarted/shutdown + // + // An empty list defaults to no automated workload updating + // + // +listType=atomic + // +optional + WorkloadUpdateMethods []WorkloadUpdateMethod `json:"workloadUpdateMethods,omitempty"` + + // BatchEvictionSize Represents the number of VMIs that can be forced updated per + // the BatchShutdownInteral interval + // + // Defaults to 10 + // + // +optional + BatchEvictionSize *int `json:"batchEvictionSize,omitempty"` + + // BatchEvictionInterval Represents the interval to wait before issuing the next + // batch of shutdowns + // + // Defaults to 1 minute + // + // +optional + BatchEvictionInterval *metav1.Duration `json:"batchEvictionInterval,omitempty"` +} + +type KubeVirtSpec struct { + // The image tag to use for the continer images installed. + // Defaults to the same tag as the operator's container image. + ImageTag string `json:"imageTag,omitempty"` + // The image registry to pull the container images from + // Defaults to the same registry the operator's container image is pulled from. + ImageRegistry string `json:"imageRegistry,omitempty"` + + // The ImagePullPolicy to use. + ImagePullPolicy k8sv1.PullPolicy `json:"imagePullPolicy,omitempty" valid:"required"` + + // The imagePullSecrets to pull the container images from + // Defaults to none + // +listType=atomic + ImagePullSecrets []k8sv1.LocalObjectReference `json:"imagePullSecrets,omitempty"` + + // The namespace Prometheus is deployed in + // Defaults to openshift-monitor + MonitorNamespace string `json:"monitorNamespace,omitempty"` + + // The namespace the service monitor will be deployed + // When ServiceMonitorNamespace is set, then we'll install the service monitor object in that namespace + // otherwise we will use the monitoring namespace. + ServiceMonitorNamespace string `json:"serviceMonitorNamespace,omitempty"` + + // The name of the Prometheus service account that needs read-access to KubeVirt endpoints + // Defaults to prometheus-k8s + MonitorAccount string `json:"monitorAccount,omitempty"` + + // WorkloadUpdateStrategy defines at the cluster level how to handle + // automated workload updates + WorkloadUpdateStrategy KubeVirtWorkloadUpdateStrategy `json:"workloadUpdateStrategy,omitempty"` + + // Specifies if kubevirt can be deleted if workloads are still present. + // This is mainly a precaution to avoid accidental data loss + UninstallStrategy KubeVirtUninstallStrategy `json:"uninstallStrategy,omitempty"` + + CertificateRotationStrategy KubeVirtCertificateRotateStrategy `json:"certificateRotateStrategy,omitempty"` + + // Designate the apps.kubevirt.io/version label for KubeVirt components. + // Useful if KubeVirt is included as part of a product. + // If ProductVersion is not specified, KubeVirt's version will be used. + ProductVersion string `json:"productVersion,omitempty"` + + // Designate the apps.kubevirt.io/part-of label for KubeVirt components. + // Useful if KubeVirt is included as part of a product. + // If ProductName is not specified, the part-of label will be omitted. + ProductName string `json:"productName,omitempty"` + + // Designate the apps.kubevirt.io/component label for KubeVirt components. + // Useful if KubeVirt is included as part of a product. + // If ProductComponent is not specified, the component label default value is kubevirt. + ProductComponent string `json:"productComponent,omitempty"` + + // holds kubevirt configurations. + // same as the virt-configMap + Configuration KubeVirtConfiguration `json:"configuration,omitempty"` + + // selectors and tolerations that should apply to KubeVirt infrastructure components + // +optional + Infra *ComponentConfig `json:"infra,omitempty"` + + // selectors and tolerations that should apply to KubeVirt workloads + // +optional + Workloads *ComponentConfig `json:"workloads,omitempty"` + + CustomizeComponents CustomizeComponents `json:"customizeComponents,omitempty"` +} + +type CustomizeComponents struct { + // +listType=atomic + Patches []CustomizeComponentsPatch `json:"patches,omitempty"` + + // Configure the value used for deployment and daemonset resources + Flags *Flags `json:"flags,omitempty"` +} + +// Flags will create a patch that will replace all flags for the container's +// command field. The only flags that will be used are those define. There are no +// guarantees around forward/backward compatibility. If set incorrectly this will +// cause the resource when rolled out to error until flags are updated. +type Flags struct { + API map[string]string `json:"api,omitempty"` + Controller map[string]string `json:"controller,omitempty"` + Handler map[string]string `json:"handler,omitempty"` +} + +type CustomizeComponentsPatch struct { + // +kubebuilder:validation:MinLength=1 + ResourceName string `json:"resourceName"` + // +kubebuilder:validation:MinLength=1 + ResourceType string `json:"resourceType"` + Patch string `json:"patch"` + Type PatchType `json:"type"` +} + +type PatchType string + +const ( + JSONPatchType PatchType = "json" + MergePatchType PatchType = "merge" + StrategicMergePatchType PatchType = "strategic" +) + +type KubeVirtUninstallStrategy string + +const ( + KubeVirtUninstallStrategyRemoveWorkloads KubeVirtUninstallStrategy = "RemoveWorkloads" + KubeVirtUninstallStrategyBlockUninstallIfWorkloadsExist KubeVirtUninstallStrategy = "BlockUninstallIfWorkloadsExist" +) + +// GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. +type GenerationStatus struct { + // group is the group of the thing you're tracking + Group string `json:"group"` + // resource is the resource type of the thing you're tracking + Resource string `json:"resource"` + // namespace is where the thing you're tracking is + // +optional + Namespace string `json:"namespace,omitempty" optional:"true"` + // name is the name of the thing you're tracking + Name string `json:"name"` + // lastGeneration is the last generation of the workload controller involved + LastGeneration int64 `json:"lastGeneration"` + // hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + // +optional + Hash string `json:"hash,omitempty" optional:"true"` +} + +// KubeVirtStatus represents information pertaining to a KubeVirt deployment. +type KubeVirtStatus struct { + Phase KubeVirtPhase `json:"phase,omitempty"` + Conditions []KubeVirtCondition `json:"conditions,omitempty" optional:"true"` + OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"` + TargetKubeVirtRegistry string `json:"targetKubeVirtRegistry,omitempty" optional:"true"` + TargetKubeVirtVersion string `json:"targetKubeVirtVersion,omitempty" optional:"true"` + TargetDeploymentConfig string `json:"targetDeploymentConfig,omitempty" optional:"true"` + TargetDeploymentID string `json:"targetDeploymentID,omitempty" optional:"true"` + ObservedKubeVirtRegistry string `json:"observedKubeVirtRegistry,omitempty" optional:"true"` + ObservedKubeVirtVersion string `json:"observedKubeVirtVersion,omitempty" optional:"true"` + ObservedDeploymentConfig string `json:"observedDeploymentConfig,omitempty" optional:"true"` + ObservedDeploymentID string `json:"observedDeploymentID,omitempty" optional:"true"` + OutdatedVirtualMachineInstanceWorkloads *int `json:"outdatedVirtualMachineInstanceWorkloads,omitempty" optional:"true"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` + // +listType=atomic + Generations []GenerationStatus `json:"generations,omitempty" optional:"true"` +} + +// KubeVirtPhase is a label for the phase of a KubeVirt deployment at the current time. +type KubeVirtPhase string + +// These are the valid KubeVirt deployment phases +const ( + // The deployment is processing + KubeVirtPhaseDeploying KubeVirtPhase = "Deploying" + // The deployment succeeded + KubeVirtPhaseDeployed KubeVirtPhase = "Deployed" + // The deletion is processing + KubeVirtPhaseDeleting KubeVirtPhase = "Deleting" + // The deletion succeeeded + KubeVirtPhaseDeleted KubeVirtPhase = "Deleted" +) + +// KubeVirtCondition represents a condition of a KubeVirt deployment +type KubeVirtCondition struct { + Type KubeVirtConditionType `json:"type"` + Status k8sv1.ConditionStatus `json:"status"` + // +optional + // +nullable + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // +optional + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +type KubeVirtConditionType string + +// These are the valid KubeVirt condition types +const ( + // Whether the deployment or deletion was successful (only used if false) + KubeVirtConditionSynchronized KubeVirtConditionType = "Synchronized" + // Whether all resources were created and up-to-date + KubeVirtConditionCreated KubeVirtConditionType = "Created" + + // Conditions for HCO, see https://github.com/kubevirt/hyperconverged-cluster-operator/blob/master/docs/conditions.md + // Whether KubeVirt is functional and available in the cluster. + KubeVirtConditionAvailable KubeVirtConditionType = "Available" + // Whether the operator is actively making changes to KubeVirt + KubeVirtConditionProgressing KubeVirtConditionType = "Progressing" + // Whether KubeVirt is not functioning completely + KubeVirtConditionDegraded KubeVirtConditionType = "Degraded" +) + +const ( + EvictionStrategyNone EvictionStrategy = "None" + EvictionStrategyLiveMigrate EvictionStrategy = "LiveMigrate" + EvictionStrategyExternal EvictionStrategy = "External" +) + +// RestartOptions may be provided when deleting an API object. +type RestartOptions struct { + metav1.TypeMeta `json:",inline"` + + // The duration in seconds before the object should be force-restarted. Value must be non-negative integer. + // The value zero indicates, restart immediately. If this value is nil, the default grace period for deletion of the corresponding VMI for the + // specified type will be used to determine on how much time to give the VMI to restart. + // Defaults to a per object value if not specified. zero means restart immediately. + // Allowed Values: nil and 0 + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,2,rep,name=dryRun"` +} + +// StartOptions may be provided on start request. +type StartOptions struct { + metav1.TypeMeta `json:",inline"` + + // Indicates that VM will be started in paused state. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` +} + +// PauseOptions may be provided on pause request. +type PauseOptions struct { + metav1.TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` +} + +// UnpauseOptions may be provided on unpause request. +type UnpauseOptions struct { + metav1.TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` +} + +const ( + StartRequestDataPausedKey string = "paused" + StartRequestDataPausedTrue string = "true" +) + +// StopOptions may be provided when deleting an API object. +type StopOptions struct { + metav1.TypeMeta `json:",inline"` + + // this updates the VMIs terminationGracePeriodSeconds during shutdown + // +optional + GracePeriod *int64 `json:"gracePeriod,omitempty" protobuf:"varint,1,opt,name=gracePeriod"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,2,rep,name=dryRun"` +} + +// MigrateOptions may be provided on migrate request. +type MigrateOptions struct { + metav1.TypeMeta `json:",inline"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` +} + +// VirtualMachineInstanceGuestAgentInfo represents information from the installed guest agent +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceGuestAgentInfo struct { + metav1.TypeMeta `json:",inline"` + // GAVersion is a version of currently installed guest agent + GAVersion string `json:"guestAgentVersion,omitempty"` + // Return command list the guest agent supports + // +listType=atomic + SupportedCommands []GuestAgentCommandInfo `json:"supportedCommands,omitempty"` + // Hostname represents FQDN of a guest + Hostname string `json:"hostname,omitempty"` + // OS contains the guest operating system information + OS VirtualMachineInstanceGuestOSInfo `json:"os,omitempty"` + // Timezone is guest os current timezone + Timezone string `json:"timezone,omitempty"` + // UserList is a list of active guest OS users + UserList []VirtualMachineInstanceGuestOSUser `json:"userList,omitempty"` + // FSInfo is a guest os filesystem information containing the disk mapping and disk mounts with usage + FSInfo VirtualMachineInstanceFileSystemInfo `json:"fsInfo,omitempty"` + // FSFreezeStatus is the state of the fs of the guest + // it can be either frozen or thawed + FSFreezeStatus string `json:"fsFreezeStatus,omitempty"` +} + +// List of commands that QEMU guest agent supports +type GuestAgentCommandInfo struct { + Name string `json:"name"` + Enabled bool `json:"enabled,omitempty"` +} + +// VirtualMachineInstanceGuestOSUserList comprises the list of all active users on guest machine +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceGuestOSUserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstanceGuestOSUser `json:"items"` +} + +// VirtualMachineGuestOSUser is the single user of the guest os +type VirtualMachineInstanceGuestOSUser struct { + UserName string `json:"userName"` + Domain string `json:"domain,omitempty"` + LoginTime float64 `json:"loginTime,omitempty"` +} + +// VirtualMachineInstanceFileSystemInfo represents information regarding single guest os filesystem +type VirtualMachineInstanceFileSystemInfo struct { + Filesystems []VirtualMachineInstanceFileSystem `json:"disks"` +} + +// VirtualMachineInstanceFileSystemList comprises the list of all filesystems on guest machine +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VirtualMachineInstanceFileSystemList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []VirtualMachineInstanceFileSystem `json:"items"` +} + +// VirtualMachineInstanceFileSystem represents guest os disk +type VirtualMachineInstanceFileSystem struct { + DiskName string `json:"diskName"` + MountPoint string `json:"mountPoint"` + FileSystemType string `json:"fileSystemType"` + UsedBytes int `json:"usedBytes"` + TotalBytes int `json:"totalBytes"` +} + +// FreezeUnfreezeTimeout represent the time unfreeze will be triggered if guest was not unfrozen by unfreeze command +type FreezeUnfreezeTimeout struct { + UnfreezeTimeout *metav1.Duration `json:"unfreezeTimeout"` +} + +// VirtualMachineMemoryDumpRequest represent the memory dump request phase and info +type VirtualMachineMemoryDumpRequest struct { + // ClaimName is the name of the pvc that will contain the memory dump + ClaimName string `json:"claimName"` + // Phase represents the memory dump phase + Phase MemoryDumpPhase `json:"phase"` + // Remove represents request of dissociating the memory dump pvc + // +optional + Remove bool `json:"remove,omitempty"` + // StartTimestamp represents the time the memory dump started + // +optional + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + // EndTimestamp represents the time the memory dump was completed + // +optional + EndTimestamp *metav1.Time `json:"endTimestamp,omitempty"` + // FileName represents the name of the output file + // +optional + FileName *string `json:"fileName,omitempty"` + // Message is a detailed message about failure of the memory dump + // +optional + Message string `json:"message,omitempty"` +} + +type MemoryDumpPhase string + +const ( + // The memorydump is during pvc Associating + MemoryDumpAssociating MemoryDumpPhase = "Associating" + // The memorydump is in progress + MemoryDumpInProgress MemoryDumpPhase = "InProgress" + // The memorydump is being unmounted + MemoryDumpUnmounting MemoryDumpPhase = "Unmounting" + // The memorydump is completed + MemoryDumpCompleted MemoryDumpPhase = "Completed" + // The memorydump is being unbound + MemoryDumpDissociating MemoryDumpPhase = "Dissociating" + // The memorydump failed + MemoryDumpFailed MemoryDumpPhase = "Failed" +) + +// AddVolumeOptions is provided when dynamically hot plugging a volume and disk +type AddVolumeOptions struct { + // Name represents the name that will be used to map the + // disk to the corresponding volume. This overrides any name + // set inside the Disk struct itself. + Name string `json:"name"` + // Disk represents the hotplug disk that will be plugged into the running VMI + Disk *Disk `json:"disk"` + // VolumeSource represents the source of the volume to map to the disk. + VolumeSource *HotplugVolumeSource `json:"volumeSource"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty"` +} + +type ScreenshotOptions struct { + MoveCursor bool `json:"moveCursor"` +} + +type VSOCKOptions struct { + TargetPort uint32 `json:"targetPort"` + UseTLS *bool `json:"useTLS,omitempty"` +} + +// RemoveVolumeOptions is provided when dynamically hot unplugging volume and disk +type RemoveVolumeOptions struct { + // Name represents the name that maps to both the disk and volume that + // should be removed + Name string `json:"name"` + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + // +listType=atomic + DryRun []string `json:"dryRun,omitempty"` +} + +type TokenBucketRateLimiter struct { + // QPS indicates the maximum QPS to the apiserver from this client. + // If it's zero, the component default will be used + QPS float32 `json:"qps"` + + // Maximum burst for throttle. + // If it's zero, the component default will be used + Burst int `json:"burst"` +} + +type RateLimiter struct { + TokenBucketRateLimiter *TokenBucketRateLimiter `json:"tokenBucketRateLimiter,omitempty"` +} + +// RESTClientConfiguration allows configuring certain aspects of the k8s rest client. +type RESTClientConfiguration struct { + //RateLimiter allows selecting and configuring different rate limiters for the k8s client. + RateLimiter *RateLimiter `json:"rateLimiter,omitempty"` +} + +// ReloadableComponentConfiguration holds all generic k8s configuration options which can +// be reloaded by components without requiring a restart. +type ReloadableComponentConfiguration struct { + //RestClient can be used to tune certain aspects of the k8s client in use. + RestClient *RESTClientConfiguration `json:"restClient,omitempty"` +} + +// KubeVirtConfiguration holds all kubevirt configurations +type KubeVirtConfiguration struct { + CPUModel string `json:"cpuModel,omitempty"` + CPURequest *resource.Quantity `json:"cpuRequest,omitempty"` + DeveloperConfiguration *DeveloperConfiguration `json:"developerConfiguration,omitempty"` + EmulatedMachines []string `json:"emulatedMachines,omitempty"` + ImagePullPolicy k8sv1.PullPolicy `json:"imagePullPolicy,omitempty"` + MigrationConfiguration *MigrationConfiguration `json:"migrations,omitempty"` + MachineType string `json:"machineType,omitempty"` + NetworkConfiguration *NetworkConfiguration `json:"network,omitempty"` + OVMFPath string `json:"ovmfPath,omitempty"` + SELinuxLauncherType string `json:"selinuxLauncherType,omitempty"` + DefaultRuntimeClass string `json:"defaultRuntimeClass,omitempty"` + SMBIOSConfig *SMBiosConfiguration `json:"smbios,omitempty"` + + // EvictionStrategy defines at the cluster level if the VirtualMachineInstance should be + // migrated instead of shut-off in case of a node drain. If the VirtualMachineInstance specific + // field is set it overrides the cluster level one. + EvictionStrategy *EvictionStrategy `json:"evictionStrategy,omitempty"` + + // deprecated + SupportedGuestAgentVersions []string `json:"supportedGuestAgentVersions,omitempty"` + MemBalloonStatsPeriod *uint32 `json:"memBalloonStatsPeriod,omitempty"` + PermittedHostDevices *PermittedHostDevices `json:"permittedHostDevices,omitempty"` + MediatedDevicesConfiguration *MediatedDevicesConfiguration `json:"mediatedDevicesConfiguration,omitempty"` + MinCPUModel string `json:"minCPUModel,omitempty"` + ObsoleteCPUModels map[string]bool `json:"obsoleteCPUModels,omitempty"` + VirtualMachineInstancesPerNode *int `json:"virtualMachineInstancesPerNode,omitempty"` + APIConfiguration *ReloadableComponentConfiguration `json:"apiConfiguration,omitempty"` + WebhookConfiguration *ReloadableComponentConfiguration `json:"webhookConfiguration,omitempty"` + ControllerConfiguration *ReloadableComponentConfiguration `json:"controllerConfiguration,omitempty"` + HandlerConfiguration *ReloadableComponentConfiguration `json:"handlerConfiguration,omitempty"` + TLSConfiguration *TLSConfiguration `json:"tlsConfiguration,omitempty"` + SeccompConfiguration *SeccompConfiguration `json:"seccompConfiguration,omitempty"` +} + +type SMBiosConfiguration struct { + Manufacturer string `json:"manufacturer,omitempty"` + Product string `json:"product,omitempty"` + Version string `json:"version,omitempty"` + Sku string `json:"sku,omitempty"` + Family string `json:"family,omitempty"` +} + +type TLSProtocolVersion string + +const ( + // VersionTLS10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLS10" + // VersionTLS11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLS11" + // VersionTLS12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLS12" + // VersionTLS13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLS13" +) + +type CustomProfile struct { + LocalhostProfile *string `json:"localhostProfile,omitempty"` + RuntimeDefaultProfile bool `json:"runtimeDefaultProfile,omitempty"` +} + +type VirtualMachineInstanceProfile struct { + // CustomProfile allows to request arbitrary profile for virt-launcher + CustomProfile *CustomProfile `json:"customProfile,omitempty"` +} + +// SeccompConfiguration holds Seccomp configuration for Kubevirt components +type SeccompConfiguration struct { + // VirtualMachineInstanceProfile defines what profile should be used with virt-launcher. Defaults to none + VirtualMachineInstanceProfile *VirtualMachineInstanceProfile `json:"virtualMachineInstanceProfile,omitempty"` +} + +// TLSConfiguration holds TLS options +type TLSConfiguration struct { + // MinTLSVersion is a way to specify the minimum protocol version that is acceptable for TLS connections. + // Protocol versions are based on the following most common TLS configurations: + // + // https://ssl-config.mozilla.org/ + // + // Note that SSLv3.0 is not a supported protocol version due to well known + // vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE + // +kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13 + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion,omitempty"` + // +listType=set + Ciphers []string `json:"ciphers,omitempty"` +} + +// MigrationConfiguration holds migration options. +// Can be overridden for specific groups of VMs though migration policies. +// Visit https://kubevirt.io/user-guide/operations/migration_policies/ for more information. +type MigrationConfiguration struct { + // NodeDrainTaintKey defines the taint key that indicates a node should be drained. + // Note: this option relies on the deprecated node taint feature. Default: kubevirt.io/drain + NodeDrainTaintKey *string `json:"nodeDrainTaintKey,omitempty"` + // ParallelOutboundMigrationsPerNode is the maximum number of concurrent outgoing live migrations + // allowed per node. Defaults to 2 + ParallelOutboundMigrationsPerNode *uint32 `json:"parallelOutboundMigrationsPerNode,omitempty"` + // ParallelMigrationsPerCluster is the total number of concurrent live migrations + // allowed cluster-wide. Defaults to 5 + ParallelMigrationsPerCluster *uint32 `json:"parallelMigrationsPerCluster,omitempty"` + // AllowAutoConverge allows the platform to compromise performance/availability of VMIs to + // guarantee successful VMI live migrations. Defaults to false + AllowAutoConverge *bool `json:"allowAutoConverge,omitempty"` + // BandwidthPerMigration limits the amount of network bandwith live migrations are allowed to use. + // The value is in quantity per second. Defaults to 0 (no limit) + BandwidthPerMigration *resource.Quantity `json:"bandwidthPerMigration,omitempty"` + // CompletionTimeoutPerGiB is the maximum number of seconds per GiB a migration is allowed to take. + // If a live-migration takes longer to migrate than this value multiplied by the size of the VMI, + // the migration will be cancelled, unless AllowPostCopy is true. Defaults to 800 + CompletionTimeoutPerGiB *int64 `json:"completionTimeoutPerGiB,omitempty"` + // ProgressTimeout is the maximum number of seconds a live migration is allowed to make no progress. + // Hitting this timeout means a migration transferred 0 data for that many seconds. The migration is + // then considered stuck and therefore cancelled. Defaults to 150 + ProgressTimeout *int64 `json:"progressTimeout,omitempty"` + // UnsafeMigrationOverride allows live migrations to occur even if the compatibility check + // indicates the migration will be unsafe to the guest. Defaults to false + UnsafeMigrationOverride *bool `json:"unsafeMigrationOverride,omitempty"` + // AllowPostCopy enables post-copy live migrations. Such migrations allow even the busiest VMIs + // to successfully live-migrate. However, events like a network failure can cause a VMI crash. + // If set to true, migrations will still start in pre-copy, but switch to post-copy when + // CompletionTimeoutPerGiB triggers. Defaults to false + AllowPostCopy *bool `json:"allowPostCopy,omitempty"` + // When set to true, DisableTLS will disable the additional layer of live migration encryption + // provided by KubeVirt. This is usually a bad idea. Defaults to false + DisableTLS *bool `json:"disableTLS,omitempty"` + // Network is the name of the CNI network to use for live migrations. By default, migrations go + // through the pod network. + Network *string `json:"network,omitempty"` +} + +// DiskVerification holds container disks verification limits +type DiskVerification struct { + MemoryLimit *resource.Quantity `json:"memoryLimit"` +} + +// DeveloperConfiguration holds developer options +type DeveloperConfiguration struct { + // FeatureGates is the list of experimental features to enable. Defaults to none + FeatureGates []string `json:"featureGates,omitempty"` + // LessPVCSpaceToleration determines how much smaller, in percentage, disk PVCs are + // allowed to be compared to the requested size (to account for various overheads). + // Defaults to 10 + LessPVCSpaceToleration int `json:"pvcTolerateLessSpaceUpToPercent,omitempty"` + // MinimumReservePVCBytes is the amount of space, in bytes, to leave unused on disks. + // Defaults to 131072 (128KiB) + MinimumReservePVCBytes uint64 `json:"minimumReservePVCBytes,omitempty"` + // MemoryOvercommit is the percentage of memory we want to give VMIs compared to the amount + // given to its parent pod (virt-launcher). For example, a value of 102 means the VMI will + // "see" 2% more memory than its parent pod. Values under 100 are effectively "undercommits". + // Overcommits can lead to memory exhaustion, which in turn can lead to crashes. Use carefully. + // Defaults to 100 + MemoryOvercommit int `json:"memoryOvercommit,omitempty"` + // NodeSelectors allows restricting VMI creation to nodes that match a set of labels. + // Defaults to none + NodeSelectors map[string]string `json:"nodeSelectors,omitempty"` + // UseEmulation can be set to true to allow fallback to software emulation + // in case hardware-assisted emulation is not available. Defaults to false + UseEmulation bool `json:"useEmulation,omitempty"` + // For each requested virtual CPU, CPUAllocationRatio defines how much physical CPU to request per VMI + // from the hosting node. The value is in fraction of a CPU thread (or core on non-hyperthreaded nodes). + // For example, a value of 1 means 1 physical CPU thread per VMI CPU thread. + // A value of 100 would be 1% of a physical thread allocated for each requested VMI thread. + // This option has no effect on VMIs that request dedicated CPUs. More information at: + // https://kubevirt.io/user-guide/operations/node_overcommit/#node-cpu-allocation-ratio + // Defaults to 10 + CPUAllocationRatio int `json:"cpuAllocationRatio,omitempty"` + // Allow overriding the automatically determined minimum TSC frequency of the cluster + // and fixate the minimum to this frequency. + MinimumClusterTSCFrequency *int64 `json:"minimumClusterTSCFrequency,omitempty"` + DiskVerification *DiskVerification `json:"diskVerification,omitempty"` + LogVerbosity *LogVerbosity `json:"logVerbosity,omitempty"` +} + +// LogVerbosity sets log verbosity level of various components +type LogVerbosity struct { + VirtAPI uint `json:"virtAPI,omitempty"` + VirtController uint `json:"virtController,omitempty"` + VirtHandler uint `json:"virtHandler,omitempty"` + VirtLauncher uint `json:"virtLauncher,omitempty"` + VirtOperator uint `json:"virtOperator,omitempty"` + // NodeVerbosity represents a map of nodes with a specific verbosity level + NodeVerbosity map[string]uint `json:"nodeVerbosity,omitempty"` +} + +const ( + PCIResourcePrefix = "PCI_RESOURCE" + MDevResourcePrefix = "MDEV_PCI_RESOURCE" +) + +// PermittedHostDevices holds information about devices allowed for passthrough +type PermittedHostDevices struct { + // +listType=atomic + PciHostDevices []PciHostDevice `json:"pciHostDevices,omitempty"` + // +listType=atomic + MediatedDevices []MediatedHostDevice `json:"mediatedDevices,omitempty"` +} + +// PciHostDevice represents a host PCI device allowed for passthrough +type PciHostDevice struct { + // The vendor_id:product_id tuple of the PCI device + PCIVendorSelector string `json:"pciVendorSelector"` + // The name of the resource that is representing the device. Exposed by + // a device plugin and requested by VMs. Typically of the form + // vendor.com/product_nameThe name of the resource that is representing + // the device. Exposed by a device plugin and requested by VMs. + // Typically of the form vendor.com/product_name + ResourceName string `json:"resourceName"` + // If true, KubeVirt will leave the allocation and monitoring to an + // external device plugin + ExternalResourceProvider bool `json:"externalResourceProvider,omitempty"` +} + +// MediatedHostDevice represents a host mediated device allowed for passthrough +type MediatedHostDevice struct { + MDEVNameSelector string `json:"mdevNameSelector"` + ResourceName string `json:"resourceName"` + ExternalResourceProvider bool `json:"externalResourceProvider,omitempty"` +} + +// MediatedDevicesConfiguration holds information about MDEV types to be defined, if available +type MediatedDevicesConfiguration struct { + // Deprecated. Use mediatedDeviceTypes instead. + // +optional + // +listType=atomic + MediatedDevicesTypes []string `json:"mediatedDevicesTypes,omitempty"` + // +optional + // +listType=atomic + MediatedDeviceTypes []string `json:"mediatedDeviceTypes,omitempty"` + // +optional + // +listType=atomic + NodeMediatedDeviceTypes []NodeMediatedDeviceTypesConfig `json:"nodeMediatedDeviceTypes,omitempty"` +} + +// NodeMediatedDeviceTypesConfig holds information about MDEV types to be defined in a specifc node that matches the NodeSelector field. +// +k8s:openapi-gen=true +type NodeMediatedDeviceTypesConfig struct { + // NodeSelector is a selector which must be true for the vmi to fit on a node. + // Selector which must match a node's labels for the vmi to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + NodeSelector map[string]string `json:"nodeSelector"` + // Deprecated. Use mediatedDeviceTypes instead. + // +optional + // +listType=atomic + MediatedDevicesTypes []string `json:"mediatedDevicesTypes,omitempty"` + // +optional + // +listType=atomic + MediatedDeviceTypes []string `json:"mediatedDeviceTypes"` +} + +// NetworkConfiguration holds network options +type NetworkConfiguration struct { + NetworkInterface string `json:"defaultNetworkInterface,omitempty"` + PermitSlirpInterface *bool `json:"permitSlirpInterface,omitempty"` + PermitBridgeInterfaceOnPodNetwork *bool `json:"permitBridgeInterfaceOnPodNetwork,omitempty"` +} + +// GuestAgentPing configures the guest-agent based ping probe +type GuestAgentPing struct { +} + +type ProfilerResult struct { + PprofData map[string][]byte `json:"pprofData,omitempty"` +} + +type ClusterProfilerResults struct { + ComponentResults map[string]ProfilerResult `json:"componentResults"` + Continue string `json:"continue,omitempty"` +} + +type ClusterProfilerRequest struct { + LabelSelector string `json:"labelSelector,omitempty"` + Continue string `json:"continue,omitempty"` + PageSize int64 `json:"pageSize"` +} + +// InstancetypeMatcher references a instancetype that is used to fill fields in the VMI template. +type InstancetypeMatcher struct { + // Name is the name of the VirtualMachineInstancetype or VirtualMachineClusterInstancetype + // + // +optional + Name string `json:"name,omitempty"` + + // Kind specifies which instancetype resource is referenced. + // Allowed values are: "VirtualMachineInstancetype" and "VirtualMachineClusterInstancetype". + // If not specified, "VirtualMachineClusterInstancetype" is used by default. + // + // +optional + Kind string `json:"kind,omitempty"` + + // RevisionName specifies a ControllerRevision containing a specific copy of the + // VirtualMachineInstancetype or VirtualMachineClusterInstancetype to be used. This is initially + // captured the first time the instancetype is applied to the VirtualMachineInstance. + // + // +optional + RevisionName string `json:"revisionName,omitempty"` + + // InferFromVolume lists the name of a volume that should be used to infer or discover the instancetype + // to be used through known annotations on the underlying resource. Once applied to the InstancetypeMatcher + // this field is removed. + // + // +optional + InferFromVolume string `json:"inferFromVolume,omitempty"` +} + +// PreferenceMatcher references a set of preference that is used to fill fields in the VMI template. +type PreferenceMatcher struct { + // Name is the name of the VirtualMachinePreference or VirtualMachineClusterPreference + // + // +optional + Name string `json:"name,omitempty"` + + // Kind specifies which preference resource is referenced. + // Allowed values are: "VirtualMachinePreference" and "VirtualMachineClusterPreference". + // If not specified, "VirtualMachineClusterPreference" is used by default. + // + // +optional + Kind string `json:"kind,omitempty"` + + // RevisionName specifies a ControllerRevision containing a specific copy of the + // VirtualMachinePreference or VirtualMachineClusterPreference to be used. This is + // initially captured the first time the instancetype is applied to the VirtualMachineInstance. + // + // +optional + RevisionName string `json:"revisionName,omitempty"` + + // InferFromVolume lists the name of a volume that should be used to infer or discover the preference + // to be used through known annotations on the underlying resource. Once applied to the PreferenceMatcher + // this field is removed. + // + // +optional + InferFromVolume string `json:"inferFromVolume,omitempty"` +} diff --git a/vendor/kubevirt.io/api/core/v1/types_swagger_generated.go b/vendor/kubevirt.io/api/core/v1/types_swagger_generated.go new file mode 100644 index 000000000..bd5debce6 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/types_swagger_generated.go @@ -0,0 +1,848 @@ +// Code generated by swagger-doc. DO NOT EDIT. + +package v1 + +func (VirtualMachineInstance) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + "spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.", + "status": "Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users.", + } +} + +func (VirtualMachineInstanceList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceList is a list of VirtualMachines\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstanceSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceSpec is a description of a VirtualMachineInstance.", + "priorityClassName": "If specified, indicates the pod's priority.\nIf not specified, the pod priority will be default or zero if there is no\ndefault.\n+optional", + "domain": "Specification of the desired behavior of the VirtualMachineInstance on the host.", + "nodeSelector": "NodeSelector is a selector which must be true for the vmi to fit on a node.\nSelector which must match a node's labels for the vmi to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n+optional", + "affinity": "If affinity is specifies, obey all the affinity rules", + "schedulerName": "If specified, the VMI will be dispatched by specified scheduler.\nIf not specified, the VMI will be dispatched by default scheduler.\n+optional", + "tolerations": "If toleration is specified, obey all the toleration rules.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of VMIs will be spread across a given topology\ndomains. K8s scheduler will schedule VMI pods in a way which abides by the constraints.\n+optional\n+patchMergeKey=topologyKey\n+patchStrategy=merge\n+listType=map\n+listMapKey=topologyKey\n+listMapKey=whenUnsatisfiable", + "evictionStrategy": "EvictionStrategy can be set to \"LiveMigrate\" if the VirtualMachineInstance should be\nmigrated instead of shut-off in case of a node drain.\n\n+optional", + "startStrategy": "StartStrategy can be set to \"Paused\" if Virtual Machine should be started in paused state.\n\n+optional", + "terminationGracePeriodSeconds": "Grace period observed after signalling a VirtualMachineInstance to stop after which the VirtualMachineInstance is force terminated.", + "volumes": "List of volumes that can be mounted by disks belonging to the vmi.", + "livenessProbe": "Periodic probe of VirtualMachineInstance liveness.\nVirtualmachineInstances will be stopped if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional", + "readinessProbe": "Periodic probe of VirtualMachineInstance service readiness.\nVirtualmachineInstances will be removed from service endpoints if the probe fails.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional", + "hostname": "Specifies the hostname of the vmi\nIf not specified, the hostname will be set to the name of the vmi, if dhcp or cloud-init is configured properly.\n+optional", + "subdomain": "If specified, the fully qualified vmi hostname will be \"...svc.\".\nIf not specified, the vmi will not have a domainname at all. The DNS entry will resolve to the vmi,\nno matter if the vmi itself can pick up a hostname.\n+optional", + "networks": "List of networks that can be attached to a vm's virtual interface.", + "dnsPolicy": "Set DNS policy for the pod.\nDefaults to \"ClusterFirst\".\nValid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.\nDNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.\nTo have DNS options set along with hostNetwork, you have to specify DNS policy\nexplicitly to 'ClusterFirstWithHostNet'.\n+optional", + "dnsConfig": "Specifies the DNS parameters of a pod.\nParameters specified here will be merged to the generated DNS\nconfiguration based on DNSPolicy.\n+optional", + "accessCredentials": "Specifies a set of public keys to inject into the vm guest\n+listType=atomic\n+optional", + } +} + +func (VirtualMachineInstancePhaseTransitionTimestamp) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstancePhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi", + "phase": "Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.", + "phaseTransitionTimestamp": "PhaseTransitionTimestamp is the timestamp of when the phase change occurred", + } +} + +func (TopologyHints) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (VirtualMachineInstanceStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceStatus represents information about the status of a VirtualMachineInstance. Status may trail the actual\nstate of a system.", + "nodeName": "NodeName is the name where the VirtualMachineInstance is currently running.", + "reason": "A brief CamelCase message indicating details about why the VMI is in this state. e.g. 'NodeUnresponsive'\n+optional", + "conditions": "Conditions are specific points in VirtualMachineInstance's pod runtime.", + "phase": "Phase is the status of the VirtualMachineInstance in kubernetes world. It is not the VirtualMachineInstance status, but partially correlates to it.", + "phaseTransitionTimestamps": "PhaseTransitionTimestamp is the timestamp of when the last phase change occurred\n+listType=atomic\n+optional", + "interfaces": "Interfaces represent the details of available network interfaces.", + "guestOSInfo": "Guest OS Information", + "migrationState": "Represents the status of a live migration", + "migrationMethod": "Represents the method using which the vmi can be migrated: live migration or block migration", + "migrationTransport": "This represents the migration transport", + "qosClass": "The Quality of Service (QOS) classification assigned to the virtual machine instance based on resource requirements\nSee PodQOSClass type for available QOS classes\nMore info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md\n+optional", + "launcherContainerImageVersion": "LauncherContainerImageVersion indicates what container image is currently active for the vmi.", + "evacuationNodeName": "EvacuationNodeName is used to track the eviction process of a VMI. It stores the name of the node that we want\nto evacuate. It is meant to be used by KubeVirt core components only and can't be set or modified by users.\n+optional", + "activePods": "ActivePods is a mapping of pod UID to node name.\nIt is possible for multiple pods to be running for a single VMI during migration.", + "volumeStatus": "VolumeStatus contains the statuses of all the volumes\n+optional\n+listType=atomic", + "fsFreezeStatus": "FSFreezeStatus is the state of the fs of the guest\nit can be either frozen or thawed\n+optional", + "topologyHints": "+optional", + "virtualMachineRevisionName": "VirtualMachineRevisionName is used to get the vm revision of the vmi when doing\nan online vm snapshot\n+optional", + "runtimeUser": "RuntimeUser is used to determine what user will be used in launcher\n+optional", + "VSOCKCID": "VSOCKCID is used to track the allocated VSOCK CID in the VM.\n+optional", + "selinuxContext": "SELinuxContext is the actual SELinux context of the virt-launcher pod\n+optional", + } +} + +func (PersistentVolumeClaimInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PersistentVolumeClaimInfo contains the relavant information virt-handler needs cached about a PVC", + "accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+listType=atomic\n+optional", + "volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional", + "capacity": "Capacity represents the capacity set on the corresponding PVC status\n+optional", + "requests": "Requests represents the resources requested by the corresponding PVC spec\n+optional", + "preallocated": "Preallocated indicates if the PVC's storage is preallocated or not\n+optional", + "filesystemOverhead": "Percentage of filesystem's size to be reserved when resizing the PVC\n+optional", + } +} + +func (VolumeStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VolumeStatus represents information about the status of volumes attached to the VirtualMachineInstance.", + "name": "Name is the name of the volume", + "target": "Target is the target name used when adding the volume to the VM, eg: vda", + "phase": "Phase is the phase", + "reason": "Reason is a brief description of why we are in the current hotplug volume phase", + "message": "Message is a detailed message about the current hotplug volume phase", + "persistentVolumeClaimInfo": "PersistentVolumeClaimInfo is information about the PVC that handler requires during start flow", + "hotplugVolume": "If the volume is hotplug, this will contain the hotplug status.", + "size": "Represents the size of the volume", + "memoryDumpVolume": "If the volume is memorydump volume, this will contain the memorydump info.", + } +} + +func (DomainMemoryDumpInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DomainMemoryDumpInfo represents the memory dump information", + "startTimestamp": "StartTimestamp is the time when the memory dump started", + "endTimestamp": "EndTimestamp is the time when the memory dump completed", + "claimName": "ClaimName is the name of the pvc the memory was dumped to", + "targetFileName": "TargetFileName is the name of the memory dump output", + } +} + +func (HotplugVolumeStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "HotplugVolumeStatus represents the hotplug status of the volume", + "attachPodName": "AttachPodName is the name of the pod used to attach the volume to the node.", + "attachPodUID": "AttachPodUID is the UID of the pod used to attach the volume to the node.", + } +} + +func (VirtualMachineInstanceCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "lastProbeTime": "+nullable", + "lastTransitionTime": "+nullable", + } +} + +func (VirtualMachineInstanceMigrationCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "lastProbeTime": "+nullable", + "lastTransitionTime": "+nullable", + } +} + +func (VirtualMachineInstanceNetworkInterface) SwaggerDoc() map[string]string { + return map[string]string{ + "ipAddress": "IP address of a Virtual Machine interface. It is always the first item of\nIPs", + "mac": "Hardware address of a Virtual Machine interface", + "name": "Name of the interface, corresponds to name of the network assigned to the interface", + "ipAddresses": "List of all IP addresses of a Virtual Machine interface", + "interfaceName": "The interface name inside the Virtual Machine", + "infoSource": "Specifies the origin of the interface data collected. values: domain, guest-agent, or both", + "queueCount": "Specifies how many queues are allocated by MultiQueue", + } +} + +func (VirtualMachineInstanceGuestOSInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name of the Guest OS", + "kernelRelease": "Guest OS Kernel Release", + "version": "Guest OS Version", + "prettyName": "Guest OS Pretty Name", + "versionId": "Version ID of the Guest OS", + "kernelVersion": "Kernel version of the Guest OS", + "machine": "Machine type of the Guest OS", + "id": "Guest OS Id", + } +} + +func (VirtualMachineInstanceMigrationState) SwaggerDoc() map[string]string { + return map[string]string{ + "": "+k8s:openapi-gen=true", + "startTimestamp": "The time the migration action began\n+nullable", + "endTimestamp": "The time the migration action ended\n+nullable", + "targetNodeDomainDetected": "The Target Node has seen the Domain Start Event", + "targetNodeAddress": "The address of the target node to use for the migration", + "targetDirectMigrationNodePorts": "The list of ports opened for live migration on the destination node", + "targetNode": "The target node that the VMI is moving to", + "targetPod": "The target pod that the VMI is moving to", + "targetAttachmentPodUID": "The UID of the target attachment pod for hotplug volumes", + "sourceNode": "The source node that the VMI originated on", + "completed": "Indicates the migration completed", + "failed": "Indicates that the migration failed", + "abortRequested": "Indicates that the migration has been requested to abort", + "abortStatus": "Indicates the final status of the live migration abortion", + "migrationUid": "The VirtualMachineInstanceMigration object associated with this migration", + "mode": "Lets us know if the vmi is currently running pre or post copy migration", + "migrationPolicyName": "Name of the migration policy. If string is empty, no policy is matched", + "migrationConfiguration": "Migration configurations to apply", + "targetCPUSet": "If the VMI requires dedicated CPUs, this field will\nhold the dedicated CPU set on the target node\n+listType=atomic", + "targetNodeTopology": "If the VMI requires dedicated CPUs, this field will\nhold the numa topology on the target node", + } +} + +func (VMISelector) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Name of the VirtualMachineInstance to migrate", + } +} + +func (VirtualMachineInstanceReplicaSet) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstance is *the* VirtualMachineInstance Definition. It represents a virtual machine in the runtime environment of kubernetes.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + "spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.", + "status": "Status is the high level overview of how the VirtualMachineInstance is doing. It contains information available to controllers and users.\n+nullable", + } +} + +func (VirtualMachineInstanceReplicaSetList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VMIList is a list of VMIs\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstanceReplicaSetSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit\nzero and not specified. Defaults to 1.\n+optional", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are\nselected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "paused": "Indicates that the replica set is paused.\n+optional", + } +} + +func (VirtualMachineInstanceReplicaSetStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).\n+optional", + "readyReplicas": "The number of ready replicas for this replica set.\n+optional", + "labelSelector": "Canonical form of the label selector for HPA which consumes it through the scale subresource.", + } +} + +func (VirtualMachineInstanceReplicaSetCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "lastProbeTime": "+nullable", + "lastTransitionTime": "+nullable", + } +} + +func (DataVolumeTemplateDummyStatus) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (DataVolumeTemplateSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "spec": "DataVolumeSpec contains the DataVolume specification.", + "status": "DataVolumeTemplateDummyStatus is here simply for backwards compatibility with\na previous API.\n+nullable\n+optional", + } +} + +func (VirtualMachineInstanceTemplateSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "metadata": "+kubebuilder:pruning:PreserveUnknownFields\n+nullable", + "spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.", + } +} + +func (VirtualMachineInstanceMigration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceMigration represents the object tracking a VMI's migration\nto another host in the cluster\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + } +} + +func (VirtualMachineInstanceMigrationList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceMigrationList is a list of VirtualMachineMigrations\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstanceMigrationSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", + } +} + +func (VirtualMachineInstanceMigrationPhaseTransitionTimestamp) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi", + "phase": "Phase is the status of the VirtualMachineInstanceMigrationPhase in kubernetes world. It is not the VirtualMachineInstanceMigrationPhase status, but partially correlates to it.", + "phaseTransitionTimestamp": "PhaseTransitionTimestamp is the timestamp of when the phase change occurred", + } +} + +func (VirtualMachineInstanceMigrationStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceMigration reprents information pertaining to a VMI's migration.", + "phaseTransitionTimestamps": "PhaseTransitionTimestamp is the timestamp of when the last phase change occurred\n+listType=atomic\n+optional", + "migrationState": "Represents the status of a live migration", + } +} + +func (VirtualMachineInstancePreset) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Deprecated for removal in v2, please use VirtualMachineInstanceType and VirtualMachinePreference instead.\n\nVirtualMachineInstancePreset defines a VMI spec.domain to be applied to all VMIs that match the provided label selector\nMore info: https://kubevirt.io/user-guide/virtual_machines/presets/#overrides\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + "spec": "VirtualMachineInstance Spec contains the VirtualMachineInstance specification.", + } +} + +func (VirtualMachineInstancePresetList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstancePresetList is a list of VirtualMachinePresets\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstancePresetSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "selector": "Selector is a label query over a set of VMIs.\nRequired.", + "domain": "Domain is the same object type as contained in VirtualMachineInstanceSpec", + } +} + +func (VirtualMachine) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachine handles the VirtualMachines that are not running\nor are in a stopped state\nThe VirtualMachine contains the template to create the\nVirtualMachineInstance. It also mirrors the running state of the created\nVirtualMachineInstance in its status.\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + "spec": "Spec contains the specification of VirtualMachineInstance created", + "status": "Status holds the current state of the controller and brief information\nabout its associated VirtualMachineInstance", + } +} + +func (VirtualMachineList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineList is a list of virtualmachines\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineSpec describes how the proper VirtualMachine\nshould look like", + "running": "Running controls whether the associatied VirtualMachineInstance is created or not\nMutually exclusive with RunStrategy", + "runStrategy": "Running state indicates the requested running state of the VirtualMachineInstance\nmutually exclusive with Running", + "instancetype": "InstancetypeMatcher references a instancetype that is used to fill fields in Template", + "preference": "PreferenceMatcher references a set of preference that is used to fill fields in Template", + "template": "Template is the direct specification of VirtualMachineInstance", + "dataVolumeTemplates": "dataVolumeTemplates is a list of dataVolumes that the VirtualMachineInstance template can reference.\nDataVolumes in this list are dynamically created for the VirtualMachine and are tied to the VirtualMachine's life-cycle.", + } +} + +func (VirtualMachineStartFailure) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineStartFailure tracks VMIs which failed to transition successfully\nto running using the VM status", + } +} + +func (VirtualMachineStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineStatus represents the status returned by the\ncontroller to describe how the VirtualMachine is doing", + "snapshotInProgress": "SnapshotInProgress is the name of the VirtualMachineSnapshot currently executing", + "restoreInProgress": "RestoreInProgress is the name of the VirtualMachineRestore currently executing", + "created": "Created indicates if the virtual machine is created in the cluster", + "ready": "Ready indicates if the virtual machine is running and ready", + "printableStatus": "PrintableStatus is a human readable, high-level representation of the status of the virtual machine", + "conditions": "Hold the state information of the VirtualMachine and its VirtualMachineInstance", + "stateChangeRequests": "StateChangeRequests indicates a list of actions that should be taken on a VMI\ne.g. stop a specific VMI then start a new one.", + "volumeRequests": "VolumeRequests indicates a list of volumes add or remove from the VMI template and\nhotplug on an active running VMI.\n+listType=atomic", + "volumeSnapshotStatuses": "VolumeSnapshotStatuses indicates a list of statuses whether snapshotting is\nsupported by each volume.", + "startFailure": "StartFailure tracks consecutive VMI startup failures for the purposes of\ncrash loop backoffs\n+nullable\n+optional", + "memoryDumpRequest": "MemoryDumpRequest tracks memory dump request phase and info of getting a memory\ndump to the given pvc\n+nullable\n+optional", + } +} + +func (VolumeSnapshotStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "name": "Volume name", + "enabled": "True if the volume supports snapshotting", + "reason": "Empty if snapshotting is enabled, contains reason otherwise", + } +} + +func (VirtualMachineVolumeRequest) SwaggerDoc() map[string]string { + return map[string]string{ + "addVolumeOptions": "AddVolumeOptions when set indicates a volume should be added. The details\nwithin this field specify how to add the volume", + "removeVolumeOptions": "RemoveVolumeOptions when set indicates a volume should be removed. The details\nwithin this field specify how to add the volume", + } +} + +func (VirtualMachineStateChangeRequest) SwaggerDoc() map[string]string { + return map[string]string{ + "action": "Indicates the type of action that is requested. e.g. Start or Stop", + "data": "Provides additional data in order to perform the Action", + "uid": "Indicates the UUID of an existing Virtual Machine Instance that this change request applies to -- if applicable", + } +} + +func (VirtualMachineCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineCondition represents the state of VirtualMachine", + "lastProbeTime": "+nullable", + "lastTransitionTime": "+nullable", + } +} + +func (Handler) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Handler defines a specific action that should be taken", + "exec": "One and only one of the following should be specified.\nExec specifies the action to take, it will be executed on the guest through the qemu-guest-agent.\nIf the guest agent is not available, this probe will fail.\n+optional", + "guestAgentPing": "GuestAgentPing contacts the qemu-guest-agent for availability checks.\n+optional", + "httpGet": "HTTPGet specifies the http request to perform.\n+optional", + "tcpSocket": "TCPSocket specifies an action involving a TCP port.\nTCP hooks not yet supported\n+optional", + } +} + +func (Probe) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Probe describes a health check to be performed against a VirtualMachineInstance to determine whether it is\nalive or ready to receive traffic.", + "initialDelaySeconds": "Number of seconds after the VirtualMachineInstance has started before liveness probes are initiated.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional", + "timeoutSeconds": "Number of seconds after which the probe times out.\nFor exec probes the timeout fails the probe but does not terminate the command running on the guest.\nThis means a blocking command can result in an increasing load on the guest.\nA small buffer will be added to the resulting workload exec probe to compensate for delays\ncaused by the qemu guest exec mechanism.\nDefaults to 1 second. Minimum value is 1.\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes\n+optional", + "periodSeconds": "How often (in seconds) to perform the probe.\nDefault to 10 seconds. Minimum value is 1.\n+optional", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed.\nDefaults to 1. Must be 1 for liveness. Minimum value is 1.\n+optional", + "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded.\nDefaults to 3. Minimum value is 1.\n+optional", + } +} + +func (KubeVirt) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirt represents the object deploying all KubeVirt resources\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+genclient", + } +} + +func (KubeVirtList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirtList is a list of KubeVirts\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (KubeVirtSelfSignConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "caRotateInterval": "Deprecated. Use CA.Duration instead", + "certRotateInterval": "Deprecated. Use Server.Duration instead", + "caOverlapInterval": "Deprecated. Use CA.Duration and CA.RenewBefore instead", + "ca": "CA configuration\nCA certs are kept in the CA bundle as long as they are valid", + "server": "Server configuration\nCerts are rotated and discarded", + } +} + +func (CertConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CertConfig contains the tunables for TLS certificates", + "duration": "The requested 'duration' (i.e. lifetime) of the Certificate.", + "renewBefore": "The amount of time before the currently issued certificate's \"notAfter\"\ntime that we will begin to attempt to renew the certificate.", + } +} + +func (KubeVirtCertificateRotateStrategy) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (KubeVirtWorkloadUpdateStrategy) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirtWorkloadUpdateStrategy defines options related to updating a KubeVirt install", + "workloadUpdateMethods": "WorkloadUpdateMethods defines the methods that can be used to disrupt workloads\nduring automated workload updates.\nWhen multiple methods are present, the least disruptive method takes\nprecedence over more disruptive methods. For example if both LiveMigrate and Shutdown\nmethods are listed, only VMs which are not live migratable will be restarted/shutdown\n\nAn empty list defaults to no automated workload updating\n\n+listType=atomic\n+optional", + "batchEvictionSize": "BatchEvictionSize Represents the number of VMIs that can be forced updated per\nthe BatchShutdownInteral interval\n\nDefaults to 10\n\n+optional", + "batchEvictionInterval": "BatchEvictionInterval Represents the interval to wait before issuing the next\nbatch of shutdowns\n\nDefaults to 1 minute\n\n+optional", + } +} + +func (KubeVirtSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "imageTag": "The image tag to use for the continer images installed.\nDefaults to the same tag as the operator's container image.", + "imageRegistry": "The image registry to pull the container images from\nDefaults to the same registry the operator's container image is pulled from.", + "imagePullPolicy": "The ImagePullPolicy to use.", + "imagePullSecrets": "The imagePullSecrets to pull the container images from\nDefaults to none\n+listType=atomic", + "monitorNamespace": "The namespace Prometheus is deployed in\nDefaults to openshift-monitor", + "serviceMonitorNamespace": "The namespace the service monitor will be deployed\n When ServiceMonitorNamespace is set, then we'll install the service monitor object in that namespace\notherwise we will use the monitoring namespace.", + "monitorAccount": "The name of the Prometheus service account that needs read-access to KubeVirt endpoints\nDefaults to prometheus-k8s", + "workloadUpdateStrategy": "WorkloadUpdateStrategy defines at the cluster level how to handle\nautomated workload updates", + "uninstallStrategy": "Specifies if kubevirt can be deleted if workloads are still present.\nThis is mainly a precaution to avoid accidental data loss", + "productVersion": "Designate the apps.kubevirt.io/version label for KubeVirt components.\nUseful if KubeVirt is included as part of a product.\nIf ProductVersion is not specified, KubeVirt's version will be used.", + "productName": "Designate the apps.kubevirt.io/part-of label for KubeVirt components.\nUseful if KubeVirt is included as part of a product.\nIf ProductName is not specified, the part-of label will be omitted.", + "productComponent": "Designate the apps.kubevirt.io/component label for KubeVirt components.\nUseful if KubeVirt is included as part of a product.\nIf ProductComponent is not specified, the component label default value is kubevirt.", + "configuration": "holds kubevirt configurations.\nsame as the virt-configMap", + "infra": "selectors and tolerations that should apply to KubeVirt infrastructure components\n+optional", + "workloads": "selectors and tolerations that should apply to KubeVirt workloads\n+optional", + } +} + +func (CustomizeComponents) SwaggerDoc() map[string]string { + return map[string]string{ + "patches": "+listType=atomic", + "flags": "Configure the value used for deployment and daemonset resources", + } +} + +func (Flags) SwaggerDoc() map[string]string { + return map[string]string{ + "": "Flags will create a patch that will replace all flags for the container's\ncommand field. The only flags that will be used are those define. There are no\nguarantees around forward/backward compatibility. If set incorrectly this will\ncause the resource when rolled out to error until flags are updated.", + } +} + +func (CustomizeComponentsPatch) SwaggerDoc() map[string]string { + return map[string]string{ + "resourceName": "+kubebuilder:validation:MinLength=1", + "resourceType": "+kubebuilder:validation:MinLength=1", + } +} + +func (GenerationStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made.", + "group": "group is the group of the thing you're tracking", + "resource": "resource is the resource type of the thing you're tracking", + "namespace": "namespace is where the thing you're tracking is\n+optional", + "name": "name is the name of the thing you're tracking", + "lastGeneration": "lastGeneration is the last generation of the workload controller involved", + "hash": "hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps\n+optional", + } +} + +func (KubeVirtStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirtStatus represents information pertaining to a KubeVirt deployment.", + "generations": "+listType=atomic", + } +} + +func (KubeVirtCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirtCondition represents a condition of a KubeVirt deployment", + "lastProbeTime": "+optional\n+nullable", + "lastTransitionTime": "+optional\n+nullable", + } +} + +func (RestartOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "RestartOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be force-restarted. Value must be non-negative integer.\nThe value zero indicates, restart immediately. If this value is nil, the default grace period for deletion of the corresponding VMI for the\nspecified type will be used to determine on how much time to give the VMI to restart.\nDefaults to a per object value if not specified. zero means restart immediately.\nAllowed Values: nil and 0\n+optional", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (StartOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StartOptions may be provided on start request.", + "paused": "Indicates that VM will be started in paused state.\n+optional", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (PauseOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PauseOptions may be provided on pause request.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (UnpauseOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "UnpauseOptions may be provided on unpause request.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (StopOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StopOptions may be provided when deleting an API object.", + "gracePeriod": "this updates the VMIs terminationGracePeriodSeconds during shutdown\n+optional", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (MigrateOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "MigrateOptions may be provided on migrate request.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (VirtualMachineInstanceGuestAgentInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceGuestAgentInfo represents information from the installed guest agent\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "guestAgentVersion": "GAVersion is a version of currently installed guest agent", + "supportedCommands": "Return command list the guest agent supports\n+listType=atomic", + "hostname": "Hostname represents FQDN of a guest", + "os": "OS contains the guest operating system information", + "timezone": "Timezone is guest os current timezone", + "userList": "UserList is a list of active guest OS users", + "fsInfo": "FSInfo is a guest os filesystem information containing the disk mapping and disk mounts with usage", + "fsFreezeStatus": "FSFreezeStatus is the state of the fs of the guest\nit can be either frozen or thawed", + } +} + +func (GuestAgentCommandInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "": "List of commands that QEMU guest agent supports", + } +} + +func (VirtualMachineInstanceGuestOSUserList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceGuestOSUserList comprises the list of all active users on guest machine\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstanceGuestOSUser) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineGuestOSUser is the single user of the guest os", + } +} + +func (VirtualMachineInstanceFileSystemInfo) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceFileSystemInfo represents information regarding single guest os filesystem", + } +} + +func (VirtualMachineInstanceFileSystemList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceFileSystemList comprises the list of all filesystems on guest machine\n\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + } +} + +func (VirtualMachineInstanceFileSystem) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineInstanceFileSystem represents guest os disk", + } +} + +func (FreezeUnfreezeTimeout) SwaggerDoc() map[string]string { + return map[string]string{ + "": "FreezeUnfreezeTimeout represent the time unfreeze will be triggered if guest was not unfrozen by unfreeze command", + } +} + +func (VirtualMachineMemoryDumpRequest) SwaggerDoc() map[string]string { + return map[string]string{ + "": "VirtualMachineMemoryDumpRequest represent the memory dump request phase and info", + "claimName": "ClaimName is the name of the pvc that will contain the memory dump", + "phase": "Phase represents the memory dump phase", + "remove": "Remove represents request of dissociating the memory dump pvc\n+optional", + "startTimestamp": "StartTimestamp represents the time the memory dump started\n+optional", + "endTimestamp": "EndTimestamp represents the time the memory dump was completed\n+optional", + "fileName": "FileName represents the name of the output file\n+optional", + "message": "Message is a detailed message about failure of the memory dump\n+optional", + } +} + +func (AddVolumeOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "AddVolumeOptions is provided when dynamically hot plugging a volume and disk", + "name": "Name represents the name that will be used to map the\ndisk to the corresponding volume. This overrides any name\nset inside the Disk struct itself.", + "disk": "Disk represents the hotplug disk that will be plugged into the running VMI", + "volumeSource": "VolumeSource represents the source of the volume to map to the disk.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (ScreenshotOptions) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (VSOCKOptions) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (RemoveVolumeOptions) SwaggerDoc() map[string]string { + return map[string]string{ + "": "RemoveVolumeOptions is provided when dynamically hot unplugging volume and disk", + "name": "Name represents the name that maps to both the disk and volume that\nshould be removed", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + } +} + +func (TokenBucketRateLimiter) SwaggerDoc() map[string]string { + return map[string]string{ + "qps": "QPS indicates the maximum QPS to the apiserver from this client.\nIf it's zero, the component default will be used", + "burst": "Maximum burst for throttle.\nIf it's zero, the component default will be used", + } +} + +func (RateLimiter) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (RESTClientConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "RESTClientConfiguration allows configuring certain aspects of the k8s rest client.", + "rateLimiter": "RateLimiter allows selecting and configuring different rate limiters for the k8s client.", + } +} + +func (ReloadableComponentConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ReloadableComponentConfiguration holds all generic k8s configuration options which can\nbe reloaded by components without requiring a restart.", + "restClient": "RestClient can be used to tune certain aspects of the k8s client in use.", + } +} + +func (KubeVirtConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "KubeVirtConfiguration holds all kubevirt configurations", + "evictionStrategy": "EvictionStrategy defines at the cluster level if the VirtualMachineInstance should be\nmigrated instead of shut-off in case of a node drain. If the VirtualMachineInstance specific\nfield is set it overrides the cluster level one.", + "supportedGuestAgentVersions": "deprecated", + } +} + +func (SMBiosConfiguration) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (CustomProfile) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (VirtualMachineInstanceProfile) SwaggerDoc() map[string]string { + return map[string]string{ + "customProfile": "CustomProfile allows to request arbitrary profile for virt-launcher", + } +} + +func (SeccompConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "SeccompConfiguration holds Seccomp configuration for Kubevirt components", + "virtualMachineInstanceProfile": "VirtualMachineInstanceProfile defines what profile should be used with virt-launcher. Defaults to none", + } +} + +func (TLSConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "TLSConfiguration holds TLS options", + "minTLSVersion": "MinTLSVersion is a way to specify the minimum protocol version that is acceptable for TLS connections.\nProtocol versions are based on the following most common TLS configurations:\n\n https://ssl-config.mozilla.org/\n\nNote that SSLv3.0 is not a supported protocol version due to well known\nvulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE\n+kubebuilder:validation:Enum=VersionTLS10;VersionTLS11;VersionTLS12;VersionTLS13", + "ciphers": "+listType=set", + } +} + +func (MigrationConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "MigrationConfiguration holds migration options.\nCan be overridden for specific groups of VMs though migration policies.\nVisit https://kubevirt.io/user-guide/operations/migration_policies/ for more information.", + "nodeDrainTaintKey": "NodeDrainTaintKey defines the taint key that indicates a node should be drained.\nNote: this option relies on the deprecated node taint feature. Default: kubevirt.io/drain", + "parallelOutboundMigrationsPerNode": "ParallelOutboundMigrationsPerNode is the maximum number of concurrent outgoing live migrations\nallowed per node. Defaults to 2", + "parallelMigrationsPerCluster": "ParallelMigrationsPerCluster is the total number of concurrent live migrations\nallowed cluster-wide. Defaults to 5", + "allowAutoConverge": "AllowAutoConverge allows the platform to compromise performance/availability of VMIs to\nguarantee successful VMI live migrations. Defaults to false", + "bandwidthPerMigration": "BandwidthPerMigration limits the amount of network bandwith live migrations are allowed to use.\nThe value is in quantity per second. Defaults to 0 (no limit)", + "completionTimeoutPerGiB": "CompletionTimeoutPerGiB is the maximum number of seconds per GiB a migration is allowed to take.\nIf a live-migration takes longer to migrate than this value multiplied by the size of the VMI,\nthe migration will be cancelled, unless AllowPostCopy is true. Defaults to 800", + "progressTimeout": "ProgressTimeout is the maximum number of seconds a live migration is allowed to make no progress.\nHitting this timeout means a migration transferred 0 data for that many seconds. The migration is\nthen considered stuck and therefore cancelled. Defaults to 150", + "unsafeMigrationOverride": "UnsafeMigrationOverride allows live migrations to occur even if the compatibility check\nindicates the migration will be unsafe to the guest. Defaults to false", + "allowPostCopy": "AllowPostCopy enables post-copy live migrations. Such migrations allow even the busiest VMIs\nto successfully live-migrate. However, events like a network failure can cause a VMI crash.\nIf set to true, migrations will still start in pre-copy, but switch to post-copy when\nCompletionTimeoutPerGiB triggers. Defaults to false", + "disableTLS": "When set to true, DisableTLS will disable the additional layer of live migration encryption\nprovided by KubeVirt. This is usually a bad idea. Defaults to false", + "network": "Network is the name of the CNI network to use for live migrations. By default, migrations go\nthrough the pod network.", + } +} + +func (DiskVerification) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DiskVerification holds container disks verification limits", + } +} + +func (DeveloperConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DeveloperConfiguration holds developer options", + "featureGates": "FeatureGates is the list of experimental features to enable. Defaults to none", + "pvcTolerateLessSpaceUpToPercent": "LessPVCSpaceToleration determines how much smaller, in percentage, disk PVCs are\nallowed to be compared to the requested size (to account for various overheads).\nDefaults to 10", + "minimumReservePVCBytes": "MinimumReservePVCBytes is the amount of space, in bytes, to leave unused on disks.\nDefaults to 131072 (128KiB)", + "memoryOvercommit": "MemoryOvercommit is the percentage of memory we want to give VMIs compared to the amount\ngiven to its parent pod (virt-launcher). For example, a value of 102 means the VMI will\n\"see\" 2% more memory than its parent pod. Values under 100 are effectively \"undercommits\".\nOvercommits can lead to memory exhaustion, which in turn can lead to crashes. Use carefully.\nDefaults to 100", + "nodeSelectors": "NodeSelectors allows restricting VMI creation to nodes that match a set of labels.\nDefaults to none", + "useEmulation": "UseEmulation can be set to true to allow fallback to software emulation\nin case hardware-assisted emulation is not available. Defaults to false", + "cpuAllocationRatio": "For each requested virtual CPU, CPUAllocationRatio defines how much physical CPU to request per VMI\nfrom the hosting node. The value is in fraction of a CPU thread (or core on non-hyperthreaded nodes).\nFor example, a value of 1 means 1 physical CPU thread per VMI CPU thread.\nA value of 100 would be 1% of a physical thread allocated for each requested VMI thread.\nThis option has no effect on VMIs that request dedicated CPUs. More information at:\nhttps://kubevirt.io/user-guide/operations/node_overcommit/#node-cpu-allocation-ratio\nDefaults to 10", + "minimumClusterTSCFrequency": "Allow overriding the automatically determined minimum TSC frequency of the cluster\nand fixate the minimum to this frequency.", + } +} + +func (LogVerbosity) SwaggerDoc() map[string]string { + return map[string]string{ + "": "LogVerbosity sets log verbosity level of various components", + "nodeVerbosity": "NodeVerbosity represents a map of nodes with a specific verbosity level", + } +} + +func (PermittedHostDevices) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PermittedHostDevices holds information about devices allowed for passthrough", + "pciHostDevices": "+listType=atomic", + "mediatedDevices": "+listType=atomic", + } +} + +func (PciHostDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PciHostDevice represents a host PCI device allowed for passthrough", + "pciVendorSelector": "The vendor_id:product_id tuple of the PCI device", + "resourceName": "The name of the resource that is representing the device. Exposed by\na device plugin and requested by VMs. Typically of the form\nvendor.com/product_nameThe name of the resource that is representing\nthe device. Exposed by a device plugin and requested by VMs.\nTypically of the form vendor.com/product_name", + "externalResourceProvider": "If true, KubeVirt will leave the allocation and monitoring to an\nexternal device plugin", + } +} + +func (MediatedHostDevice) SwaggerDoc() map[string]string { + return map[string]string{ + "": "MediatedHostDevice represents a host mediated device allowed for passthrough", + } +} + +func (MediatedDevicesConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "MediatedDevicesConfiguration holds information about MDEV types to be defined, if available", + "mediatedDevicesTypes": "Deprecated. Use mediatedDeviceTypes instead.\n+optional\n+listType=atomic", + "mediatedDeviceTypes": "+optional\n+listType=atomic", + "nodeMediatedDeviceTypes": "+optional\n+listType=atomic", + } +} + +func (NodeMediatedDeviceTypesConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "NodeMediatedDeviceTypesConfig holds information about MDEV types to be defined in a specifc node that matches the NodeSelector field.\n+k8s:openapi-gen=true", + "nodeSelector": "NodeSelector is a selector which must be true for the vmi to fit on a node.\nSelector which must match a node's labels for the vmi to be scheduled on that node.\nMore info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "mediatedDevicesTypes": "Deprecated. Use mediatedDeviceTypes instead.\n+optional\n+listType=atomic", + "mediatedDeviceTypes": "+optional\n+listType=atomic", + } +} + +func (NetworkConfiguration) SwaggerDoc() map[string]string { + return map[string]string{ + "": "NetworkConfiguration holds network options", + } +} + +func (GuestAgentPing) SwaggerDoc() map[string]string { + return map[string]string{ + "": "GuestAgentPing configures the guest-agent based ping probe", + } +} + +func (ProfilerResult) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (ClusterProfilerResults) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (ClusterProfilerRequest) SwaggerDoc() map[string]string { + return map[string]string{} +} + +func (InstancetypeMatcher) SwaggerDoc() map[string]string { + return map[string]string{ + "": "InstancetypeMatcher references a instancetype that is used to fill fields in the VMI template.", + "name": "Name is the name of the VirtualMachineInstancetype or VirtualMachineClusterInstancetype\n\n+optional", + "kind": "Kind specifies which instancetype resource is referenced.\nAllowed values are: \"VirtualMachineInstancetype\" and \"VirtualMachineClusterInstancetype\".\nIf not specified, \"VirtualMachineClusterInstancetype\" is used by default.\n\n+optional", + "revisionName": "RevisionName specifies a ControllerRevision containing a specific copy of the\nVirtualMachineInstancetype or VirtualMachineClusterInstancetype to be used. This is initially\ncaptured the first time the instancetype is applied to the VirtualMachineInstance.\n\n+optional", + "inferFromVolume": "InferFromVolume lists the name of a volume that should be used to infer or discover the instancetype\nto be used through known annotations on the underlying resource. Once applied to the InstancetypeMatcher\nthis field is removed.\n\n+optional", + } +} + +func (PreferenceMatcher) SwaggerDoc() map[string]string { + return map[string]string{ + "": "PreferenceMatcher references a set of preference that is used to fill fields in the VMI template.", + "name": "Name is the name of the VirtualMachinePreference or VirtualMachineClusterPreference\n\n+optional", + "kind": "Kind specifies which preference resource is referenced.\nAllowed values are: \"VirtualMachinePreference\" and \"VirtualMachineClusterPreference\".\nIf not specified, \"VirtualMachineClusterPreference\" is used by default.\n\n+optional", + "revisionName": "RevisionName specifies a ControllerRevision containing a specific copy of the\nVirtualMachinePreference or VirtualMachineClusterPreference to be used. This is\ninitially captured the first time the instancetype is applied to the VirtualMachineInstance.\n\n+optional", + "inferFromVolume": "InferFromVolume lists the name of a volume that should be used to infer or discover the preference\nto be used through known annotations on the underlying resource. Once applied to the PreferenceMatcher\nthis field is removed.\n\n+optional", + } +} diff --git a/vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go b/vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go new file mode 100644 index 000000000..08972aea2 --- /dev/null +++ b/vendor/kubevirt.io/api/core/v1/zz_generated.defaults.go @@ -0,0 +1,566 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 The KubeVirt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&VirtualMachine{}, func(obj interface{}) { SetObjectDefaults_VirtualMachine(obj.(*VirtualMachine)) }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstance{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineInstance(obj.(*VirtualMachineInstance)) }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceList{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineInstanceList(obj.(*VirtualMachineInstanceList)) }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstancePreset{}, func(obj interface{}) { + SetObjectDefaults_VirtualMachineInstancePreset(obj.(*VirtualMachineInstancePreset)) + }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstancePresetList{}, func(obj interface{}) { + SetObjectDefaults_VirtualMachineInstancePresetList(obj.(*VirtualMachineInstancePresetList)) + }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceReplicaSet{}, func(obj interface{}) { + SetObjectDefaults_VirtualMachineInstanceReplicaSet(obj.(*VirtualMachineInstanceReplicaSet)) + }) + scheme.AddTypeDefaultingFunc(&VirtualMachineInstanceReplicaSetList{}, func(obj interface{}) { + SetObjectDefaults_VirtualMachineInstanceReplicaSetList(obj.(*VirtualMachineInstanceReplicaSetList)) + }) + scheme.AddTypeDefaultingFunc(&VirtualMachineList{}, func(obj interface{}) { SetObjectDefaults_VirtualMachineList(obj.(*VirtualMachineList)) }) + return nil +} + +func SetObjectDefaults_VirtualMachine(in *VirtualMachine) { + if in.Spec.Template != nil { + if in.Spec.Template.Spec.Domain.Firmware != nil { + SetDefaults_Firmware(in.Spec.Template.Spec.Domain.Firmware) + } + if in.Spec.Template.Spec.Domain.Clock != nil { + if in.Spec.Template.Spec.Domain.Clock.Timer != nil { + if in.Spec.Template.Spec.Domain.Clock.Timer.HPET != nil { + SetDefaults_HPETTimer(in.Spec.Template.Spec.Domain.Clock.Timer.HPET) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.KVM != nil { + SetDefaults_KVMTimer(in.Spec.Template.Spec.Domain.Clock.Timer.KVM) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.PIT != nil { + SetDefaults_PITTimer(in.Spec.Template.Spec.Domain.Clock.Timer.PIT) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.RTC != nil { + SetDefaults_RTCTimer(in.Spec.Template.Spec.Domain.Clock.Timer.RTC) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv != nil { + SetDefaults_HypervTimer(in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv) + } + } + } + if in.Spec.Template.Spec.Domain.Features != nil { + SetDefaults_FeatureState(&in.Spec.Template.Spec.Domain.Features.ACPI) + if in.Spec.Template.Spec.Domain.Features.APIC != nil { + SetDefaults_FeatureAPIC(in.Spec.Template.Spec.Domain.Features.APIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv != nil { + if in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks != nil { + SetDefaults_FeatureSpinlocks(in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer != nil { + SetDefaults_SyNICTimer(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer) + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct) + } + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Reset != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reset) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID != nil { + SetDefaults_FeatureVendorID(in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.IPI != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.IPI) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS) + } + } + if in.Spec.Template.Spec.Domain.Features.SMM != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.SMM) + } + if in.Spec.Template.Spec.Domain.Features.Pvspinlock != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Pvspinlock) + } + } + for i := range in.Spec.Template.Spec.Domain.Devices.Disks { + a := &in.Spec.Template.Spec.Domain.Devices.Disks[i] + SetDefaults_DiskDevice(&a.DiskDevice) + if a.DiskDevice.CDRom != nil { + SetDefaults_CDRomTarget(a.DiskDevice.CDRom) + } + if a.BlockSize != nil { + if a.BlockSize.MatchVolume != nil { + SetDefaults_FeatureState(a.BlockSize.MatchVolume) + } + } + } + if in.Spec.Template.Spec.Domain.Devices.Watchdog != nil { + SetDefaults_Watchdog(in.Spec.Template.Spec.Domain.Devices.Watchdog) + if in.Spec.Template.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB != nil { + SetDefaults_I6300ESBWatchdog(in.Spec.Template.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB) + } + } + for i := range in.Spec.Template.Spec.Domain.Devices.GPUs { + a := &in.Spec.Template.Spec.Domain.Devices.GPUs[i] + if a.VirtualGPUOptions != nil { + if a.VirtualGPUOptions.Display != nil { + if a.VirtualGPUOptions.Display.RamFB != nil { + SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB) + } + } + } + } + if in.Spec.Template.Spec.LivenessProbe != nil { + SetDefaults_Probe(in.Spec.Template.Spec.LivenessProbe) + } + if in.Spec.Template.Spec.ReadinessProbe != nil { + SetDefaults_Probe(in.Spec.Template.Spec.ReadinessProbe) + } + } + for i := range in.Status.VolumeRequests { + a := &in.Status.VolumeRequests[i] + if a.AddVolumeOptions != nil { + if a.AddVolumeOptions.Disk != nil { + SetDefaults_DiskDevice(&a.AddVolumeOptions.Disk.DiskDevice) + if a.AddVolumeOptions.Disk.DiskDevice.CDRom != nil { + SetDefaults_CDRomTarget(a.AddVolumeOptions.Disk.DiskDevice.CDRom) + } + if a.AddVolumeOptions.Disk.BlockSize != nil { + if a.AddVolumeOptions.Disk.BlockSize.MatchVolume != nil { + SetDefaults_FeatureState(a.AddVolumeOptions.Disk.BlockSize.MatchVolume) + } + } + } + } + } +} + +func SetObjectDefaults_VirtualMachineInstance(in *VirtualMachineInstance) { + SetDefaults_VirtualMachineInstance(in) + if in.Spec.Domain.Firmware != nil { + SetDefaults_Firmware(in.Spec.Domain.Firmware) + } + if in.Spec.Domain.Clock != nil { + if in.Spec.Domain.Clock.Timer != nil { + if in.Spec.Domain.Clock.Timer.HPET != nil { + SetDefaults_HPETTimer(in.Spec.Domain.Clock.Timer.HPET) + } + if in.Spec.Domain.Clock.Timer.KVM != nil { + SetDefaults_KVMTimer(in.Spec.Domain.Clock.Timer.KVM) + } + if in.Spec.Domain.Clock.Timer.PIT != nil { + SetDefaults_PITTimer(in.Spec.Domain.Clock.Timer.PIT) + } + if in.Spec.Domain.Clock.Timer.RTC != nil { + SetDefaults_RTCTimer(in.Spec.Domain.Clock.Timer.RTC) + } + if in.Spec.Domain.Clock.Timer.Hyperv != nil { + SetDefaults_HypervTimer(in.Spec.Domain.Clock.Timer.Hyperv) + } + } + } + if in.Spec.Domain.Features != nil { + SetDefaults_FeatureState(&in.Spec.Domain.Features.ACPI) + if in.Spec.Domain.Features.APIC != nil { + SetDefaults_FeatureAPIC(in.Spec.Domain.Features.APIC) + } + if in.Spec.Domain.Features.Hyperv != nil { + if in.Spec.Domain.Features.Hyperv.Relaxed != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Relaxed) + } + if in.Spec.Domain.Features.Hyperv.VAPIC != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VAPIC) + } + if in.Spec.Domain.Features.Hyperv.Spinlocks != nil { + SetDefaults_FeatureSpinlocks(in.Spec.Domain.Features.Hyperv.Spinlocks) + } + if in.Spec.Domain.Features.Hyperv.VPIndex != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VPIndex) + } + if in.Spec.Domain.Features.Hyperv.Runtime != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Runtime) + } + if in.Spec.Domain.Features.Hyperv.SyNIC != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNIC) + } + if in.Spec.Domain.Features.Hyperv.SyNICTimer != nil { + SetDefaults_SyNICTimer(in.Spec.Domain.Features.Hyperv.SyNICTimer) + if in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct) + } + } + if in.Spec.Domain.Features.Hyperv.Reset != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reset) + } + if in.Spec.Domain.Features.Hyperv.VendorID != nil { + SetDefaults_FeatureVendorID(in.Spec.Domain.Features.Hyperv.VendorID) + } + if in.Spec.Domain.Features.Hyperv.Frequencies != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Frequencies) + } + if in.Spec.Domain.Features.Hyperv.Reenlightenment != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reenlightenment) + } + if in.Spec.Domain.Features.Hyperv.TLBFlush != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.TLBFlush) + } + if in.Spec.Domain.Features.Hyperv.IPI != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.IPI) + } + if in.Spec.Domain.Features.Hyperv.EVMCS != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.EVMCS) + } + } + if in.Spec.Domain.Features.SMM != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.SMM) + } + if in.Spec.Domain.Features.Pvspinlock != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Pvspinlock) + } + } + for i := range in.Spec.Domain.Devices.Disks { + a := &in.Spec.Domain.Devices.Disks[i] + SetDefaults_DiskDevice(&a.DiskDevice) + if a.DiskDevice.CDRom != nil { + SetDefaults_CDRomTarget(a.DiskDevice.CDRom) + } + if a.BlockSize != nil { + if a.BlockSize.MatchVolume != nil { + SetDefaults_FeatureState(a.BlockSize.MatchVolume) + } + } + } + if in.Spec.Domain.Devices.Watchdog != nil { + SetDefaults_Watchdog(in.Spec.Domain.Devices.Watchdog) + if in.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB != nil { + SetDefaults_I6300ESBWatchdog(in.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB) + } + } + for i := range in.Spec.Domain.Devices.GPUs { + a := &in.Spec.Domain.Devices.GPUs[i] + if a.VirtualGPUOptions != nil { + if a.VirtualGPUOptions.Display != nil { + if a.VirtualGPUOptions.Display.RamFB != nil { + SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB) + } + } + } + } + if in.Spec.LivenessProbe != nil { + SetDefaults_Probe(in.Spec.LivenessProbe) + } + if in.Spec.ReadinessProbe != nil { + SetDefaults_Probe(in.Spec.ReadinessProbe) + } +} + +func SetObjectDefaults_VirtualMachineInstanceList(in *VirtualMachineInstanceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_VirtualMachineInstance(a) + } +} + +func SetObjectDefaults_VirtualMachineInstancePreset(in *VirtualMachineInstancePreset) { + if in.Spec.Domain != nil { + if in.Spec.Domain.Firmware != nil { + SetDefaults_Firmware(in.Spec.Domain.Firmware) + } + if in.Spec.Domain.Clock != nil { + if in.Spec.Domain.Clock.Timer != nil { + if in.Spec.Domain.Clock.Timer.HPET != nil { + SetDefaults_HPETTimer(in.Spec.Domain.Clock.Timer.HPET) + } + if in.Spec.Domain.Clock.Timer.KVM != nil { + SetDefaults_KVMTimer(in.Spec.Domain.Clock.Timer.KVM) + } + if in.Spec.Domain.Clock.Timer.PIT != nil { + SetDefaults_PITTimer(in.Spec.Domain.Clock.Timer.PIT) + } + if in.Spec.Domain.Clock.Timer.RTC != nil { + SetDefaults_RTCTimer(in.Spec.Domain.Clock.Timer.RTC) + } + if in.Spec.Domain.Clock.Timer.Hyperv != nil { + SetDefaults_HypervTimer(in.Spec.Domain.Clock.Timer.Hyperv) + } + } + } + if in.Spec.Domain.Features != nil { + SetDefaults_FeatureState(&in.Spec.Domain.Features.ACPI) + if in.Spec.Domain.Features.APIC != nil { + SetDefaults_FeatureAPIC(in.Spec.Domain.Features.APIC) + } + if in.Spec.Domain.Features.Hyperv != nil { + if in.Spec.Domain.Features.Hyperv.Relaxed != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Relaxed) + } + if in.Spec.Domain.Features.Hyperv.VAPIC != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VAPIC) + } + if in.Spec.Domain.Features.Hyperv.Spinlocks != nil { + SetDefaults_FeatureSpinlocks(in.Spec.Domain.Features.Hyperv.Spinlocks) + } + if in.Spec.Domain.Features.Hyperv.VPIndex != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.VPIndex) + } + if in.Spec.Domain.Features.Hyperv.Runtime != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Runtime) + } + if in.Spec.Domain.Features.Hyperv.SyNIC != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNIC) + } + if in.Spec.Domain.Features.Hyperv.SyNICTimer != nil { + SetDefaults_SyNICTimer(in.Spec.Domain.Features.Hyperv.SyNICTimer) + if in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.SyNICTimer.Direct) + } + } + if in.Spec.Domain.Features.Hyperv.Reset != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reset) + } + if in.Spec.Domain.Features.Hyperv.VendorID != nil { + SetDefaults_FeatureVendorID(in.Spec.Domain.Features.Hyperv.VendorID) + } + if in.Spec.Domain.Features.Hyperv.Frequencies != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Frequencies) + } + if in.Spec.Domain.Features.Hyperv.Reenlightenment != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.Reenlightenment) + } + if in.Spec.Domain.Features.Hyperv.TLBFlush != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.TLBFlush) + } + if in.Spec.Domain.Features.Hyperv.IPI != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.IPI) + } + if in.Spec.Domain.Features.Hyperv.EVMCS != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Hyperv.EVMCS) + } + } + if in.Spec.Domain.Features.SMM != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.SMM) + } + if in.Spec.Domain.Features.Pvspinlock != nil { + SetDefaults_FeatureState(in.Spec.Domain.Features.Pvspinlock) + } + } + for i := range in.Spec.Domain.Devices.Disks { + a := &in.Spec.Domain.Devices.Disks[i] + SetDefaults_DiskDevice(&a.DiskDevice) + if a.DiskDevice.CDRom != nil { + SetDefaults_CDRomTarget(a.DiskDevice.CDRom) + } + if a.BlockSize != nil { + if a.BlockSize.MatchVolume != nil { + SetDefaults_FeatureState(a.BlockSize.MatchVolume) + } + } + } + if in.Spec.Domain.Devices.Watchdog != nil { + SetDefaults_Watchdog(in.Spec.Domain.Devices.Watchdog) + if in.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB != nil { + SetDefaults_I6300ESBWatchdog(in.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB) + } + } + for i := range in.Spec.Domain.Devices.GPUs { + a := &in.Spec.Domain.Devices.GPUs[i] + if a.VirtualGPUOptions != nil { + if a.VirtualGPUOptions.Display != nil { + if a.VirtualGPUOptions.Display.RamFB != nil { + SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB) + } + } + } + } + } +} + +func SetObjectDefaults_VirtualMachineInstancePresetList(in *VirtualMachineInstancePresetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_VirtualMachineInstancePreset(a) + } +} + +func SetObjectDefaults_VirtualMachineInstanceReplicaSet(in *VirtualMachineInstanceReplicaSet) { + if in.Spec.Template != nil { + if in.Spec.Template.Spec.Domain.Firmware != nil { + SetDefaults_Firmware(in.Spec.Template.Spec.Domain.Firmware) + } + if in.Spec.Template.Spec.Domain.Clock != nil { + if in.Spec.Template.Spec.Domain.Clock.Timer != nil { + if in.Spec.Template.Spec.Domain.Clock.Timer.HPET != nil { + SetDefaults_HPETTimer(in.Spec.Template.Spec.Domain.Clock.Timer.HPET) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.KVM != nil { + SetDefaults_KVMTimer(in.Spec.Template.Spec.Domain.Clock.Timer.KVM) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.PIT != nil { + SetDefaults_PITTimer(in.Spec.Template.Spec.Domain.Clock.Timer.PIT) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.RTC != nil { + SetDefaults_RTCTimer(in.Spec.Template.Spec.Domain.Clock.Timer.RTC) + } + if in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv != nil { + SetDefaults_HypervTimer(in.Spec.Template.Spec.Domain.Clock.Timer.Hyperv) + } + } + } + if in.Spec.Template.Spec.Domain.Features != nil { + SetDefaults_FeatureState(&in.Spec.Template.Spec.Domain.Features.ACPI) + if in.Spec.Template.Spec.Domain.Features.APIC != nil { + SetDefaults_FeatureAPIC(in.Spec.Template.Spec.Domain.Features.APIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv != nil { + if in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Relaxed) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VAPIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks != nil { + SetDefaults_FeatureSpinlocks(in.Spec.Template.Spec.Domain.Features.Hyperv.Spinlocks) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.VPIndex) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Runtime) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNIC) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer != nil { + SetDefaults_SyNICTimer(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer) + if in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.SyNICTimer.Direct) + } + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Reset != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reset) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID != nil { + SetDefaults_FeatureVendorID(in.Spec.Template.Spec.Domain.Features.Hyperv.VendorID) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Frequencies) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.Reenlightenment) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.TLBFlush) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.IPI != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.IPI) + } + if in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Hyperv.EVMCS) + } + } + if in.Spec.Template.Spec.Domain.Features.SMM != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.SMM) + } + if in.Spec.Template.Spec.Domain.Features.Pvspinlock != nil { + SetDefaults_FeatureState(in.Spec.Template.Spec.Domain.Features.Pvspinlock) + } + } + for i := range in.Spec.Template.Spec.Domain.Devices.Disks { + a := &in.Spec.Template.Spec.Domain.Devices.Disks[i] + SetDefaults_DiskDevice(&a.DiskDevice) + if a.DiskDevice.CDRom != nil { + SetDefaults_CDRomTarget(a.DiskDevice.CDRom) + } + if a.BlockSize != nil { + if a.BlockSize.MatchVolume != nil { + SetDefaults_FeatureState(a.BlockSize.MatchVolume) + } + } + } + if in.Spec.Template.Spec.Domain.Devices.Watchdog != nil { + SetDefaults_Watchdog(in.Spec.Template.Spec.Domain.Devices.Watchdog) + if in.Spec.Template.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB != nil { + SetDefaults_I6300ESBWatchdog(in.Spec.Template.Spec.Domain.Devices.Watchdog.WatchdogDevice.I6300ESB) + } + } + for i := range in.Spec.Template.Spec.Domain.Devices.GPUs { + a := &in.Spec.Template.Spec.Domain.Devices.GPUs[i] + if a.VirtualGPUOptions != nil { + if a.VirtualGPUOptions.Display != nil { + if a.VirtualGPUOptions.Display.RamFB != nil { + SetDefaults_FeatureState(a.VirtualGPUOptions.Display.RamFB) + } + } + } + } + if in.Spec.Template.Spec.LivenessProbe != nil { + SetDefaults_Probe(in.Spec.Template.Spec.LivenessProbe) + } + if in.Spec.Template.Spec.ReadinessProbe != nil { + SetDefaults_Probe(in.Spec.Template.Spec.ReadinessProbe) + } + } +} + +func SetObjectDefaults_VirtualMachineInstanceReplicaSetList(in *VirtualMachineInstanceReplicaSetList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_VirtualMachineInstanceReplicaSet(a) + } +} + +func SetObjectDefaults_VirtualMachineList(in *VirtualMachineList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_VirtualMachine(a) + } +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/LICENSE b/vendor/kubevirt.io/containerized-data-importer-api/LICENSE new file mode 100644 index 000000000..549d874d4 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 The KubeVirt Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go new file mode 100644 index 000000000..77f5af700 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/register.go @@ -0,0 +1,6 @@ +package core + +const ( + // GroupName to hold the string name for the cdi project + GroupName = "cdi.kubevirt.io" +) diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go new file mode 100644 index 000000000..d6d23951c --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// +groupName=cdi.kubevirt.io +package v1beta1 diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go new file mode 100644 index 000000000..3b7fd892c --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/register.go @@ -0,0 +1,54 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "kubevirt.io/containerized-data-importer-api/pkg/apis/core" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: core.GroupName, Version: "v1beta1"} + +//CDIGroupVersionKind group version kind +var CDIGroupVersionKind = schema.GroupVersionKind{Group: SchemeGroupVersion.Group, Version: SchemeGroupVersion.Version, Kind: "CDI"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder tbd + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme tbd + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DataVolume{}, + &DataVolumeList{}, + &CDIConfig{}, + &CDIConfigList{}, + &CDI{}, + &CDIList{}, + &StorageProfile{}, + &StorageProfileList{}, + &DataSource{}, + &DataSourceList{}, + &DataImportCron{}, + &DataImportCronList{}, + &ObjectTransfer{}, + &ObjectTransferList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go new file mode 100644 index 000000000..7194d4014 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types.go @@ -0,0 +1,832 @@ +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + ocpconfigv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/api" +) + +// DataVolume is an abstraction on top of PersistentVolumeClaims to allow easy population of those PersistentVolumeClaims with relation to VirtualMachines +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=dv;dvs,categories=all +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase the data volume is in" +// +kubebuilder:printcolumn:name="Progress",type="string",JSONPath=".status.progress",description="Transfer progress in percentage if known, N/A otherwise" +// +kubebuilder:printcolumn:name="Restarts",type="integer",JSONPath=".status.restartCount",description="The number of times the transfer has been restarted." +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +type DataVolume struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataVolumeSpec `json:"spec"` + Status DataVolumeStatus `json:"status,omitempty"` +} + +// DataVolumeSpec defines the DataVolume type specification +type DataVolumeSpec struct { + //Source is the src of the data for the requested DataVolume + // +optional + Source *DataVolumeSource `json:"source,omitempty"` + //SourceRef is an indirect reference to the source of data for the requested DataVolume + // +optional + SourceRef *DataVolumeSourceRef `json:"sourceRef,omitempty"` + //PVC is the PVC specification + PVC *corev1.PersistentVolumeClaimSpec `json:"pvc,omitempty"` + // Storage is the requested storage specification + Storage *StorageSpec `json:"storage,omitempty"` + //PriorityClassName for Importer, Cloner and Uploader pod + PriorityClassName string `json:"priorityClassName,omitempty"` + //DataVolumeContentType options: "kubevirt", "archive" + // +kubebuilder:validation:Enum="kubevirt";"archive" + ContentType DataVolumeContentType `json:"contentType,omitempty"` + // Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import. + Checkpoints []DataVolumeCheckpoint `json:"checkpoints,omitempty"` + // FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint. + FinalCheckpoint bool `json:"finalCheckpoint,omitempty"` + // Preallocation controls whether storage for DataVolumes should be allocated in advance. + Preallocation *bool `json:"preallocation,omitempty"` +} + +// StorageSpec defines the Storage type specification +type StorageSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty"` + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty"` + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty"` + // This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. + // If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field. + // +optional + DataSource *corev1.TypedLocalObjectReference `json:"dataSource,omitempty"` + // Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. + // This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. + // There are two important differences between DataSource and DataSourceRef: + // * While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. + // * While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified. + // (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + // +optional + DataSourceRef *corev1.TypedLocalObjectReference `json:"dataSourceRef,omitempty"` +} + +// DataVolumeCheckpoint defines a stage in a warm migration. +type DataVolumeCheckpoint struct { + // Previous is the identifier of the snapshot from the previous checkpoint. + Previous string `json:"previous"` + // Current is the identifier of the snapshot created for this checkpoint. + Current string `json:"current"` +} + +// DataVolumeContentType represents the types of the imported data +type DataVolumeContentType string + +const ( + // DataVolumeKubeVirt is the content-type of the imported file, defaults to kubevirt + DataVolumeKubeVirt DataVolumeContentType = "kubevirt" + // DataVolumeArchive is the content-type to specify if there is a need to extract the imported archive + DataVolumeArchive DataVolumeContentType = "archive" +) + +// DataVolumeSource represents the source for our Data Volume, this can be HTTP, Imageio, S3, Registry or an existing PVC +type DataVolumeSource struct { + HTTP *DataVolumeSourceHTTP `json:"http,omitempty"` + S3 *DataVolumeSourceS3 `json:"s3,omitempty"` + Registry *DataVolumeSourceRegistry `json:"registry,omitempty"` + PVC *DataVolumeSourcePVC `json:"pvc,omitempty"` + Upload *DataVolumeSourceUpload `json:"upload,omitempty"` + Blank *DataVolumeBlankImage `json:"blank,omitempty"` + Imageio *DataVolumeSourceImageIO `json:"imageio,omitempty"` + VDDK *DataVolumeSourceVDDK `json:"vddk,omitempty"` + Snapshot *DataVolumeSourceSnapshot `json:"snapshot,omitempty"` +} + +// DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC +type DataVolumeSourcePVC struct { + // The namespace of the source PVC + Namespace string `json:"namespace"` + // The name of the source PVC + Name string `json:"name"` +} + +// DataVolumeSourceSnapshot provides the parameters to create a Data Volume from an existing VolumeSnapshot +type DataVolumeSourceSnapshot struct { + // The namespace of the source VolumeSnapshot + Namespace string `json:"namespace"` + // The name of the source VolumeSnapshot + Name string `json:"name"` +} + +// DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC +type DataVolumeBlankImage struct{} + +// DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source +type DataVolumeSourceUpload struct { +} + +// DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source +type DataVolumeSourceS3 struct { + //URL is the url of the S3 source + URL string `json:"url"` + //SecretRef provides the secret reference needed to access the S3 source + SecretRef string `json:"secretRef,omitempty"` + // CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate + // +optional + CertConfigMap string `json:"certConfigMap,omitempty"` +} + +// DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source +type DataVolumeSourceRegistry struct { + //URL is the url of the registry source (starting with the scheme: docker, oci-archive) + // +optional + URL *string `json:"url,omitempty"` + //ImageStream is the name of image stream for import + // +optional + ImageStream *string `json:"imageStream,omitempty"` + //PullMethod can be either "pod" (default import), or "node" (node docker cache based import) + // +optional + PullMethod *RegistryPullMethod `json:"pullMethod,omitempty"` + //SecretRef provides the secret reference needed to access the Registry source + // +optional + SecretRef *string `json:"secretRef,omitempty"` + //CertConfigMap provides a reference to the Registry certs + // +optional + CertConfigMap *string `json:"certConfigMap,omitempty"` +} + +const ( + // RegistrySchemeDocker is docker scheme prefix + RegistrySchemeDocker = "docker" + // RegistrySchemeOci is oci-archive scheme prefix + RegistrySchemeOci = "oci-archive" +) + +// RegistryPullMethod represents the registry import pull method +type RegistryPullMethod string + +const ( + // RegistryPullPod is the standard import + RegistryPullPod RegistryPullMethod = "pod" + // RegistryPullNode is the node docker cache based import + RegistryPullNode RegistryPullMethod = "node" +) + +// DataVolumeSourceHTTP can be either an http or https endpoint, with an optional basic auth user name and password, and an optional configmap containing additional CAs +type DataVolumeSourceHTTP struct { + // URL is the URL of the http(s) endpoint + URL string `json:"url"` + // SecretRef A Secret reference, the secret should contain accessKeyId (user name) base64 encoded, and secretKey (password) also base64 encoded + // +optional + SecretRef string `json:"secretRef,omitempty"` + // CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate + // +optional + CertConfigMap string `json:"certConfigMap,omitempty"` + // ExtraHeaders is a list of strings containing extra headers to include with HTTP transfer requests + // +optional + ExtraHeaders []string `json:"extraHeaders,omitempty"` + // SecretExtraHeaders is a list of Secret references, each containing an extra HTTP header that may include sensitive information + // +optional + SecretExtraHeaders []string `json:"secretExtraHeaders,omitempty"` +} + +// DataVolumeSourceImageIO provides the parameters to create a Data Volume from an imageio source +type DataVolumeSourceImageIO struct { + //URL is the URL of the ovirt-engine + URL string `json:"url"` + // DiskID provides id of a disk to be imported + DiskID string `json:"diskId"` + //SecretRef provides the secret reference needed to access the ovirt-engine + SecretRef string `json:"secretRef,omitempty"` + //CertConfigMap provides a reference to the CA cert + CertConfigMap string `json:"certConfigMap,omitempty"` +} + +// DataVolumeSourceVDDK provides the parameters to create a Data Volume from a Vmware source +type DataVolumeSourceVDDK struct { + // URL is the URL of the vCenter or ESXi host with the VM to migrate + URL string `json:"url,omitempty"` + // UUID is the UUID of the virtual machine that the backing file is attached to in vCenter/ESXi + UUID string `json:"uuid,omitempty"` + // BackingFile is the path to the virtual hard disk to migrate from vCenter/ESXi + BackingFile string `json:"backingFile,omitempty"` + // Thumbprint is the certificate thumbprint of the vCenter or ESXi host + Thumbprint string `json:"thumbprint,omitempty"` + // SecretRef provides a reference to a secret containing the username and password needed to access the vCenter or ESXi host + SecretRef string `json:"secretRef,omitempty"` + // InitImageURL is an optional URL to an image containing an extracted VDDK library, overrides v2v-vmware config map + InitImageURL string `json:"initImageURL,omitempty"` +} + +// DataVolumeSourceRef defines an indirect reference to the source of data for the DataVolume +type DataVolumeSourceRef struct { + // The kind of the source reference, currently only "DataSource" is supported + Kind string `json:"kind"` + // The namespace of the source reference, defaults to the DataVolume namespace + // +optional + Namespace *string `json:"namespace,omitempty"` + // The name of the source reference + Name string `json:"name"` +} + +const ( + // DataVolumeDataSource is DataSource source reference for DataVolume + DataVolumeDataSource = "DataSource" +) + +// DataVolumeStatus contains the current status of the DataVolume +type DataVolumeStatus struct { + // ClaimName is the name of the underlying PVC used by the DataVolume. + ClaimName string `json:"claimName,omitempty"` + //Phase is the current phase of the data volume + Phase DataVolumePhase `json:"phase,omitempty"` + Progress DataVolumeProgress `json:"progress,omitempty"` + // RestartCount is the number of times the pod populating the DataVolume has restarted + RestartCount int32 `json:"restartCount,omitempty"` + Conditions []DataVolumeCondition `json:"conditions,omitempty" optional:"true"` +} + +// DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DataVolumeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of DataVolumes + Items []DataVolume `json:"items"` +} + +// DataVolumeCondition represents the state of a data volume condition. +type DataVolumeCondition struct { + Type DataVolumeConditionType `json:"type" description:"type of condition ie. Ready|Bound|Running."` + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` + Reason string `json:"reason,omitempty" description:"reason for the condition's last transition"` + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// DataVolumePhase is the current phase of the DataVolume +type DataVolumePhase string + +// DataVolumeProgress is the current progress of the DataVolume transfer operation. Value between 0 and 100 inclusive, N/A if not available +type DataVolumeProgress string + +// DataVolumeConditionType is the string representation of known condition types +type DataVolumeConditionType string + +const ( + // PhaseUnset represents a data volume with no current phase + PhaseUnset DataVolumePhase = "" + + // Pending represents a data volume with a current phase of Pending + Pending DataVolumePhase = "Pending" + // PVCBound represents a data volume with a current phase of PVCBound + PVCBound DataVolumePhase = "PVCBound" + + // ImportScheduled represents a data volume with a current phase of ImportScheduled + ImportScheduled DataVolumePhase = "ImportScheduled" + + // ImportInProgress represents a data volume with a current phase of ImportInProgress + ImportInProgress DataVolumePhase = "ImportInProgress" + + // CloneScheduled represents a data volume with a current phase of CloneScheduled + CloneScheduled DataVolumePhase = "CloneScheduled" + + // CloneInProgress represents a data volume with a current phase of CloneInProgress + CloneInProgress DataVolumePhase = "CloneInProgress" + + // SnapshotForSmartCloneInProgress represents a data volume with a current phase of SnapshotForSmartCloneInProgress + SnapshotForSmartCloneInProgress DataVolumePhase = "SnapshotForSmartCloneInProgress" + + // CloneFromSnapshotSourceInProgress represents a data volume with a current phase of CloneFromSnapshotSourceInProgress + CloneFromSnapshotSourceInProgress DataVolumePhase = "CloneFromSnapshotSourceInProgress" + + // SmartClonePVCInProgress represents a data volume with a current phase of SmartClonePVCInProgress + SmartClonePVCInProgress DataVolumePhase = "SmartClonePVCInProgress" + + // CSICloneInProgress represents a data volume with a current phase of CSICloneInProgress + CSICloneInProgress DataVolumePhase = "CSICloneInProgress" + + // ExpansionInProgress is the state when a PVC is expanded + ExpansionInProgress DataVolumePhase = "ExpansionInProgress" + + // NamespaceTransferInProgress is the state when a PVC is transferred + NamespaceTransferInProgress DataVolumePhase = "NamespaceTransferInProgress" + + // UploadScheduled represents a data volume with a current phase of UploadScheduled + UploadScheduled DataVolumePhase = "UploadScheduled" + + // UploadReady represents a data volume with a current phase of UploadReady + UploadReady DataVolumePhase = "UploadReady" + + // WaitForFirstConsumer represents a data volume with a current phase of WaitForFirstConsumer + WaitForFirstConsumer DataVolumePhase = "WaitForFirstConsumer" + + // Succeeded represents a DataVolumePhase of Succeeded + Succeeded DataVolumePhase = "Succeeded" + // Failed represents a DataVolumePhase of Failed + Failed DataVolumePhase = "Failed" + // Unknown represents a DataVolumePhase of Unknown + Unknown DataVolumePhase = "Unknown" + // Paused represents a DataVolumePhase of Paused + Paused DataVolumePhase = "Paused" + + // DataVolumeReady is the condition that indicates if the data volume is ready to be consumed. + DataVolumeReady DataVolumeConditionType = "Ready" + // DataVolumeBound is the condition that indicates if the underlying PVC is bound or not. + DataVolumeBound DataVolumeConditionType = "Bound" + // DataVolumeRunning is the condition that indicates if the import/upload/clone container is running. + DataVolumeRunning DataVolumeConditionType = "Running" +) + +// DataVolumeCloneSourceSubresource is the subresource checked for permission to clone +const DataVolumeCloneSourceSubresource = "source" + +// this has to be here otherwise informer-gen doesn't recognize it +// see https://github.com/kubernetes/code-generator/issues/59 +// +genclient:nonNamespaced + +// StorageProfile provides a CDI specific recommendation for storage parameters +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Cluster +type StorageProfile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StorageProfileSpec `json:"spec"` + Status StorageProfileStatus `json:"status,omitempty"` +} + +// StorageProfileSpec defines specification for StorageProfile +type StorageProfileSpec struct { + // CloneStrategy defines the preferred method for performing a CDI clone + CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"` + // ClaimPropertySets is a provided set of properties applicable to PVC + ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"` +} + +// StorageProfileStatus provides the most recently observed status of the StorageProfile +type StorageProfileStatus struct { + // The StorageClass name for which capabilities are defined + StorageClass *string `json:"storageClass,omitempty"` + // The Storage class provisioner plugin name + Provisioner *string `json:"provisioner,omitempty"` + // CloneStrategy defines the preferred method for performing a CDI clone + CloneStrategy *CDICloneStrategy `json:"cloneStrategy,omitempty"` + // ClaimPropertySets computed from the spec and detected in the system + ClaimPropertySets []ClaimPropertySet `json:"claimPropertySets,omitempty"` +} + +// ClaimPropertySet is a set of properties applicable to PVC +type ClaimPropertySet struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []corev1.PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // VolumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + VolumeMode *corev1.PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` +} + +// StorageProfileList provides the needed parameters to request a list of StorageProfile from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type StorageProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of StorageProfile + Items []StorageProfile `json:"items"` +} + +// DataSource references an import/clone source for a DataVolume +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=das,categories=all +type DataSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataSourceSpec `json:"spec"` + Status DataSourceStatus `json:"status,omitempty"` +} + +// DataSourceSpec defines specification for DataSource +type DataSourceSpec struct { + // Source is the source of the data referenced by the DataSource + Source DataSourceSource `json:"source"` +} + +// DataSourceSource represents the source for our DataSource +type DataSourceSource struct { + // +optional + PVC *DataVolumeSourcePVC `json:"pvc,omitempty"` + // +optional + Snapshot *DataVolumeSourceSnapshot `json:"snapshot,omitempty"` +} + +// DataSourceStatus provides the most recently observed status of the DataSource +type DataSourceStatus struct { + // Source is the current source of the data referenced by the DataSource + Source DataSourceSource `json:"source,omitempty"` + Conditions []DataSourceCondition `json:"conditions,omitempty" optional:"true"` +} + +// DataSourceCondition represents the state of a data source condition +type DataSourceCondition struct { + Type DataSourceConditionType `json:"type" description:"type of condition ie. Ready"` + ConditionState `json:",inline"` +} + +// DataSourceConditionType is the string representation of known condition types +type DataSourceConditionType string + +const ( + // DataSourceReady is the condition that indicates if the data source is ready to be consumed + DataSourceReady DataSourceConditionType = "Ready" +) + +// ConditionState represents the state of a condition +type ConditionState struct { + Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` + Reason string `json:"reason,omitempty" description:"reason for the condition's last transition"` + Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"` +} + +// DataSourceList provides the needed parameters to do request a list of Data Sources from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DataSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of DataSources + Items []DataSource `json:"items"` +} + +// DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=dic;dics,categories=all +type DataImportCron struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataImportCronSpec `json:"spec"` + Status DataImportCronStatus `json:"status,omitempty"` +} + +// DataImportCronSpec defines specification for DataImportCron +type DataImportCronSpec struct { + // Template specifies template for the DVs to be created + Template DataVolume `json:"template"` + // Schedule specifies in cron format when and how often to look for new imports + Schedule string `json:"schedule"` + // GarbageCollect specifies whether old PVCs should be cleaned up after a new PVC is imported. + // Options are currently "Outdated" and "Never", defaults to "Outdated". + // +optional + GarbageCollect *DataImportCronGarbageCollect `json:"garbageCollect,omitempty"` + // Number of import PVCs to keep when garbage collecting. Default is 3. + // +optional + ImportsToKeep *int32 `json:"importsToKeep,omitempty"` + // ManagedDataSource specifies the name of the corresponding DataSource this cron will manage. + // DataSource has to be in the same namespace. + ManagedDataSource string `json:"managedDataSource"` + // RetentionPolicy specifies whether the created DataVolumes and DataSources are retained when their DataImportCron is deleted. Default is RatainAll. + // +optional + RetentionPolicy *DataImportCronRetentionPolicy `json:"retentionPolicy,omitempty"` +} + +// DataImportCronGarbageCollect represents the DataImportCron garbage collection mode +type DataImportCronGarbageCollect string + +const ( + // DataImportCronGarbageCollectNever specifies that garbage collection is disabled + DataImportCronGarbageCollectNever DataImportCronGarbageCollect = "Never" + // DataImportCronGarbageCollectOutdated specifies that old PVCs should be cleaned up after a new PVC is imported + DataImportCronGarbageCollectOutdated DataImportCronGarbageCollect = "Outdated" +) + +// DataImportCronRetentionPolicy represents the DataImportCron retention policy +type DataImportCronRetentionPolicy string + +const ( + // DataImportCronRetainNone specifies that the created DataVolumes and DataSources are deleted when their DataImportCron is deleted + DataImportCronRetainNone DataImportCronRetentionPolicy = "None" + // DataImportCronRetainAll specifies that the created DataVolumes and DataSources are retained when their DataImportCron is deleted + DataImportCronRetainAll DataImportCronRetentionPolicy = "All" +) + +// DataImportCronStatus provides the most recently observed status of the DataImportCron +type DataImportCronStatus struct { + // CurrentImports are the imports in progress. Currently only a single import is supported. + CurrentImports []ImportStatus `json:"currentImports,omitempty"` + // LastImportedPVC is the last imported PVC + LastImportedPVC *DataVolumeSourcePVC `json:"lastImportedPVC,omitempty"` + // LastExecutionTimestamp is the time of the last polling + LastExecutionTimestamp *metav1.Time `json:"lastExecutionTimestamp,omitempty"` + // LastImportTimestamp is the time of the last import + LastImportTimestamp *metav1.Time `json:"lastImportTimestamp,omitempty"` + Conditions []DataImportCronCondition `json:"conditions,omitempty" optional:"true"` +} + +// ImportStatus of a currently in progress import +type ImportStatus struct { + // DataVolumeName is the currently in progress import DataVolume + DataVolumeName string `json:"DataVolumeName"` + // Digest of the currently imported image + Digest string `json:"Digest"` +} + +// DataImportCronCondition represents the state of a data import cron condition +type DataImportCronCondition struct { + Type DataImportCronConditionType `json:"type" description:"type of condition ie. Progressing, UpToDate"` + ConditionState `json:",inline"` +} + +// DataImportCronConditionType is the string representation of known condition types +type DataImportCronConditionType string + +const ( + // DataImportCronProgressing is the condition that indicates import is progressing + DataImportCronProgressing DataImportCronConditionType = "Progressing" + + // DataImportCronUpToDate is the condition that indicates latest import is up to date + DataImportCronUpToDate DataImportCronConditionType = "UpToDate" +) + +// DataImportCronList provides the needed parameters to do request a list of DataImportCrons from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DataImportCronList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of DataImportCrons + Items []DataImportCron `json:"items"` +} + +// this has to be here otherwise informer-gen doesn't recognize it +// see https://github.com/kubernetes/code-generator/issues/59 +// +genclient:nonNamespaced + +// CDI is the CDI Operator CRD +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=cdi;cdis,scope=Cluster +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" +type CDI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CDISpec `json:"spec"` + // +optional + Status CDIStatus `json:"status"` +} + +// CertConfig contains the tunables for TLS certificates +type CertConfig struct { + // The requested 'duration' (i.e. lifetime) of the Certificate. + Duration *metav1.Duration `json:"duration,omitempty"` + + // The amount of time before the currently issued certificate's `notAfter` + // time that we will begin to attempt to renew the certificate. + RenewBefore *metav1.Duration `json:"renewBefore,omitempty"` +} + +// CDICertConfig has the CertConfigs for CDI +type CDICertConfig struct { + // CA configuration + // CA certs are kept in the CA bundle as long as they are valid + CA *CertConfig `json:"ca,omitempty"` + + // Server configuration + // Certs are rotated and discarded + Server *CertConfig `json:"server,omitempty"` +} + +// CDISpec defines our specification for the CDI installation +type CDISpec struct { + // +kubebuilder:validation:Enum=Always;IfNotPresent;Never + // PullPolicy describes a policy for if/when to pull a container image + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" valid:"required"` + // +kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist + // CDIUninstallStrategy defines the state to leave CDI on uninstall + UninstallStrategy *CDIUninstallStrategy `json:"uninstallStrategy,omitempty"` + // Rules on which nodes CDI infrastructure pods will be scheduled + Infra sdkapi.NodePlacement `json:"infra,omitempty"` + // Restrict on which nodes CDI workload pods will be scheduled + Workloads sdkapi.NodePlacement `json:"workload,omitempty"` + // Clone strategy override: should we use a host-assisted copy even if snapshots are available? + // +kubebuilder:validation:Enum="copy";"snapshot" + CloneStrategyOverride *CDICloneStrategy `json:"cloneStrategyOverride,omitempty"` + // CDIConfig at CDI level + Config *CDIConfigSpec `json:"config,omitempty"` + // certificate configuration + CertConfig *CDICertConfig `json:"certConfig,omitempty"` + // PriorityClass of the CDI control plane + PriorityClass *CDIPriorityClass `json:"priorityClass,omitempty"` +} + +// CDIPriorityClass defines the priority class of the CDI control plane. +type CDIPriorityClass string + +// CDICloneStrategy defines the preferred method for performing a CDI clone (override snapshot?) +type CDICloneStrategy string + +const ( + // CloneStrategyHostAssisted specifies slower, host-assisted copy + CloneStrategyHostAssisted CDICloneStrategy = "copy" + + // CloneStrategySnapshot specifies snapshot-based copying + CloneStrategySnapshot CDICloneStrategy = "snapshot" + + // CloneStrategyCsiClone specifies csi volume clone based cloning + CloneStrategyCsiClone CDICloneStrategy = "csi-clone" +) + +// CDIUninstallStrategy defines the state to leave CDI on uninstall +type CDIUninstallStrategy string + +const ( + // CDIUninstallStrategyRemoveWorkloads specifies clean uninstall + CDIUninstallStrategyRemoveWorkloads CDIUninstallStrategy = "RemoveWorkloads" + + // CDIUninstallStrategyBlockUninstallIfWorkloadsExist "leaves stuff around" + CDIUninstallStrategyBlockUninstallIfWorkloadsExist CDIUninstallStrategy = "BlockUninstallIfWorkloadsExist" +) + +// CDIPhase is the current phase of the CDI deployment +type CDIPhase string + +// CDIStatus defines the status of the installation +type CDIStatus struct { + sdkapi.Status `json:",inline"` +} + +// CDIList provides the needed parameters to do request a list of CDIs from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CDIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of CDIs + Items []CDI `json:"items"` +} + +// this has to be here otherwise informer-gen doesn't recognize it +// see https://github.com/kubernetes/code-generator/issues/59 +// +genclient:nonNamespaced + +// CDIConfig provides a user configuration for CDI +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:scope=Cluster +type CDIConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CDIConfigSpec `json:"spec"` + Status CDIConfigStatus `json:"status,omitempty"` +} + +// Percent is a string that can only be a value between [0,1) +// (Note: we actually rely on reconcile to reject invalid values) +// +kubebuilder:validation:Pattern=`^(0(?:\.\d{1,3})?|1)$` +type Percent string + +// FilesystemOverhead defines the reserved size for PVCs with VolumeMode: Filesystem +type FilesystemOverhead struct { + // Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass) + Global Percent `json:"global,omitempty"` + // StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value + StorageClass map[string]Percent `json:"storageClass,omitempty"` +} + +// CDIConfigSpec defines specification for user configuration +type CDIConfigSpec struct { + // Override the URL used when uploading to a DataVolume + UploadProxyURLOverride *string `json:"uploadProxyURLOverride,omitempty"` + // ImportProxy contains importer pod proxy configuration. + // +optional + ImportProxy *ImportProxy `json:"importProxy,omitempty"` + // Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn't exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space + ScratchSpaceStorageClass *string `json:"scratchSpaceStorageClass,omitempty"` + // ResourceRequirements describes the compute resource requirements. + PodResourceRequirements *corev1.ResourceRequirements `json:"podResourceRequirements,omitempty"` + // FeatureGates are a list of specific enabled feature gates + FeatureGates []string `json:"featureGates,omitempty"` + // FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.055 (5.5% overhead) + FilesystemOverhead *FilesystemOverhead `json:"filesystemOverhead,omitempty"` + // Preallocation controls whether storage for DataVolumes should be allocated in advance. + Preallocation *bool `json:"preallocation,omitempty"` + // InsecureRegistries is a list of TLS disabled registries + InsecureRegistries []string `json:"insecureRegistries,omitempty"` + // DataVolumeTTLSeconds is the time in seconds after DataVolume completion it can be garbage collected. The default is 0 sec. To disable GC use -1. + // +optional + DataVolumeTTLSeconds *int32 `json:"dataVolumeTTLSeconds,omitempty"` + // TLSSecurityProfile is used by operators to apply cluster-wide TLS security settings to operands. + TLSSecurityProfile *ocpconfigv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` +} + +// CDIConfigStatus provides the most recently observed status of the CDI Config resource +type CDIConfigStatus struct { + // The calculated upload proxy URL + UploadProxyURL *string `json:"uploadProxyURL,omitempty"` + // ImportProxy contains importer pod proxy configuration. + // +optional + ImportProxy *ImportProxy `json:"importProxy,omitempty"` + // The calculated storage class to be used for scratch space + ScratchSpaceStorageClass string `json:"scratchSpaceStorageClass,omitempty"` + // ResourceRequirements describes the compute resource requirements. + DefaultPodResourceRequirements *corev1.ResourceRequirements `json:"defaultPodResourceRequirements,omitempty"` + // FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A percentage value is between 0 and 1 + FilesystemOverhead *FilesystemOverhead `json:"filesystemOverhead,omitempty"` + // Preallocation controls whether storage for DataVolumes should be allocated in advance. + Preallocation bool `json:"preallocation,omitempty"` +} + +// CDIConfigList provides the needed parameters to do request a list of CDIConfigs from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type CDIConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of CDIConfigs + Items []CDIConfig `json:"items"` +} + +// ImportProxy provides the information on how to configure the importer pod proxy. +type ImportProxy struct { + // HTTPProxy is the URL http://:@: of the import proxy for HTTP requests. Empty means unset and will not result in the import pod env var. + // +optional + HTTPProxy *string `json:"HTTPProxy,omitempty"` + // HTTPSProxy is the URL https://:@: of the import proxy for HTTPS requests. Empty means unset and will not result in the import pod env var. + // +optional + HTTPSProxy *string `json:"HTTPSProxy,omitempty"` + // NoProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in the import pod env var. + // +optional + NoProxy *string `json:"noProxy,omitempty"` + // TrustedCAProxy is the name of a ConfigMap in the cdi namespace that contains a user-provided trusted certificate authority (CA) bundle. + // The TrustedCAProxy ConfigMap is consumed by the DataImportCron controller for creating cronjobs, and by the import controller referring a copy of the ConfigMap in the import namespace. + // Here is an example of the ConfigMap (in yaml): + // + // apiVersion: v1 + // kind: ConfigMap + // metadata: + // name: my-ca-proxy-cm + // namespace: cdi + // data: + // ca.pem: | + // -----BEGIN CERTIFICATE----- + // ... ... + // -----END CERTIFICATE----- + // +optional + TrustedCAProxy *string `json:"trustedCAProxy,omitempty"` +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go new file mode 100644 index 000000000..9736d946f --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_swagger_generated.go @@ -0,0 +1,408 @@ +// Code generated by swagger-doc. DO NOT EDIT. + +package v1beta1 + +func (DataVolume) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolume is an abstraction on top of PersistentVolumeClaims to allow easy population of those PersistentVolumeClaims with relation to VirtualMachines\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=dv;dvs,categories=all\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\",description=\"The phase the data volume is in\"\n+kubebuilder:printcolumn:name=\"Progress\",type=\"string\",JSONPath=\".status.progress\",description=\"Transfer progress in percentage if known, N/A otherwise\"\n+kubebuilder:printcolumn:name=\"Restarts\",type=\"integer\",JSONPath=\".status.restartCount\",description=\"The number of times the transfer has been restarted.\"\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"", + } +} + +func (DataVolumeSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSpec defines the DataVolume type specification", + "source": "Source is the src of the data for the requested DataVolume\n+optional", + "sourceRef": "SourceRef is an indirect reference to the source of data for the requested DataVolume\n+optional", + "pvc": "PVC is the PVC specification", + "storage": "Storage is the requested storage specification", + "priorityClassName": "PriorityClassName for Importer, Cloner and Uploader pod", + "contentType": "DataVolumeContentType options: \"kubevirt\", \"archive\"\n+kubebuilder:validation:Enum=\"kubevirt\";\"archive\"", + "checkpoints": "Checkpoints is a list of DataVolumeCheckpoints, representing stages in a multistage import.", + "finalCheckpoint": "FinalCheckpoint indicates whether the current DataVolumeCheckpoint is the final checkpoint.", + "preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.", + } +} + +func (StorageSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StorageSpec defines the Storage type specification", + "accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional", + "selector": "A label query over volumes to consider for binding.\n+optional", + "resources": "Resources represents the minimum resources the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources\n+optional", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.\n+optional", + "storageClassName": "Name of the StorageClass required by the claim.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1\n+optional", + "volumeMode": "volumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.\nIf the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.\n+optional", + "dataSourceRef": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner.\nThis field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty.\nThere are two important differences between DataSource and DataSourceRef:\n* While DataSource only allows two specific types of objects, DataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef preserves all values, and generates an error if a disallowed value is specified.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.\n+optional", + } +} + +func (DataVolumeCheckpoint) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeCheckpoint defines a stage in a warm migration.", + "previous": "Previous is the identifier of the snapshot from the previous checkpoint.", + "current": "Current is the identifier of the snapshot created for this checkpoint.", + } +} + +func (DataVolumeSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSource represents the source for our Data Volume, this can be HTTP, Imageio, S3, Registry or an existing PVC", + } +} + +func (DataVolumeSourcePVC) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourcePVC provides the parameters to create a Data Volume from an existing PVC", + "namespace": "The namespace of the source PVC", + "name": "The name of the source PVC", + } +} + +func (DataVolumeSourceSnapshot) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceSnapshot provides the parameters to create a Data Volume from an existing VolumeSnapshot", + "namespace": "The namespace of the source VolumeSnapshot", + "name": "The name of the source VolumeSnapshot", + } +} + +func (DataVolumeBlankImage) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeBlankImage provides the parameters to create a new raw blank image for the PVC", + } +} + +func (DataVolumeSourceUpload) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceUpload provides the parameters to create a Data Volume by uploading the source", + } +} + +func (DataVolumeSourceS3) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceS3 provides the parameters to create a Data Volume from an S3 source", + "url": "URL is the url of the S3 source", + "secretRef": "SecretRef provides the secret reference needed to access the S3 source", + "certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional", + } +} + +func (DataVolumeSourceRegistry) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceRegistry provides the parameters to create a Data Volume from an registry source", + "url": "URL is the url of the registry source (starting with the scheme: docker, oci-archive)\n+optional", + "imageStream": "ImageStream is the name of image stream for import\n+optional", + "pullMethod": "PullMethod can be either \"pod\" (default import), or \"node\" (node docker cache based import)\n+optional", + "secretRef": "SecretRef provides the secret reference needed to access the Registry source\n+optional", + "certConfigMap": "CertConfigMap provides a reference to the Registry certs\n+optional", + } +} + +func (DataVolumeSourceHTTP) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceHTTP can be either an http or https endpoint, with an optional basic auth user name and password, and an optional configmap containing additional CAs", + "url": "URL is the URL of the http(s) endpoint", + "secretRef": "SecretRef A Secret reference, the secret should contain accessKeyId (user name) base64 encoded, and secretKey (password) also base64 encoded\n+optional", + "certConfigMap": "CertConfigMap is a configmap reference, containing a Certificate Authority(CA) public key, and a base64 encoded pem certificate\n+optional", + "extraHeaders": "ExtraHeaders is a list of strings containing extra headers to include with HTTP transfer requests\n+optional", + "secretExtraHeaders": "SecretExtraHeaders is a list of Secret references, each containing an extra HTTP header that may include sensitive information\n+optional", + } +} + +func (DataVolumeSourceImageIO) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceImageIO provides the parameters to create a Data Volume from an imageio source", + "url": "URL is the URL of the ovirt-engine", + "diskId": "DiskID provides id of a disk to be imported", + "secretRef": "SecretRef provides the secret reference needed to access the ovirt-engine", + "certConfigMap": "CertConfigMap provides a reference to the CA cert", + } +} + +func (DataVolumeSourceVDDK) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceVDDK provides the parameters to create a Data Volume from a Vmware source", + "url": "URL is the URL of the vCenter or ESXi host with the VM to migrate", + "uuid": "UUID is the UUID of the virtual machine that the backing file is attached to in vCenter/ESXi", + "backingFile": "BackingFile is the path to the virtual hard disk to migrate from vCenter/ESXi", + "thumbprint": "Thumbprint is the certificate thumbprint of the vCenter or ESXi host", + "secretRef": "SecretRef provides a reference to a secret containing the username and password needed to access the vCenter or ESXi host", + "initImageURL": "InitImageURL is an optional URL to an image containing an extracted VDDK library, overrides v2v-vmware config map", + } +} + +func (DataVolumeSourceRef) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeSourceRef defines an indirect reference to the source of data for the DataVolume", + "kind": "The kind of the source reference, currently only \"DataSource\" is supported", + "namespace": "The namespace of the source reference, defaults to the DataVolume namespace\n+optional", + "name": "The name of the source reference", + } +} + +func (DataVolumeStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeStatus contains the current status of the DataVolume", + "claimName": "ClaimName is the name of the underlying PVC used by the DataVolume.", + "phase": "Phase is the current phase of the data volume", + "restartCount": "RestartCount is the number of times the pod populating the DataVolume has restarted", + } +} + +func (DataVolumeList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeList provides the needed parameters to do request a list of Data Volumes from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of DataVolumes", + } +} + +func (DataVolumeCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataVolumeCondition represents the state of a data volume condition.", + } +} + +func (StorageProfile) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StorageProfile provides a CDI specific recommendation for storage parameters\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster", + } +} + +func (StorageProfileSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StorageProfileSpec defines specification for StorageProfile", + "cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone", + "claimPropertySets": "ClaimPropertySets is a provided set of properties applicable to PVC", + } +} + +func (StorageProfileStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StorageProfileStatus provides the most recently observed status of the StorageProfile", + "storageClass": "The StorageClass name for which capabilities are defined", + "provisioner": "The Storage class provisioner plugin name", + "cloneStrategy": "CloneStrategy defines the preferred method for performing a CDI clone", + "claimPropertySets": "ClaimPropertySets computed from the spec and detected in the system", + } +} + +func (ClaimPropertySet) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ClaimPropertySet is a set of properties applicable to PVC", + "accessModes": "AccessModes contains the desired access modes the volume should have.\nMore info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1\n+optional", + "volumeMode": "VolumeMode defines what type of volume is required by the claim.\nValue of Filesystem is implied when not included in claim spec.\n+optional", + } +} + +func (StorageProfileList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "StorageProfileList provides the needed parameters to request a list of StorageProfile from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of StorageProfile", + } +} + +func (DataSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSource references an import/clone source for a DataVolume\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=das,categories=all", + } +} + +func (DataSourceSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSourceSpec defines specification for DataSource", + "source": "Source is the source of the data referenced by the DataSource", + } +} + +func (DataSourceSource) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSourceSource represents the source for our DataSource", + "pvc": "+optional", + "snapshot": "+optional", + } +} + +func (DataSourceStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSourceStatus provides the most recently observed status of the DataSource", + "source": "Source is the current source of the data referenced by the DataSource", + } +} + +func (DataSourceCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSourceCondition represents the state of a data source condition", + } +} + +func (ConditionState) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ConditionState represents the state of a condition", + } +} + +func (DataSourceList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataSourceList provides the needed parameters to do request a list of Data Sources from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of DataSources", + } +} + +func (DataImportCron) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataImportCron defines a cron job for recurring polling/importing disk images as PVCs into a golden image namespace\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=dic;dics,categories=all", + } +} + +func (DataImportCronSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataImportCronSpec defines specification for DataImportCron", + "template": "Template specifies template for the DVs to be created", + "schedule": "Schedule specifies in cron format when and how often to look for new imports", + "garbageCollect": "GarbageCollect specifies whether old PVCs should be cleaned up after a new PVC is imported.\nOptions are currently \"Outdated\" and \"Never\", defaults to \"Outdated\".\n+optional", + "importsToKeep": "Number of import PVCs to keep when garbage collecting. Default is 3.\n+optional", + "managedDataSource": "ManagedDataSource specifies the name of the corresponding DataSource this cron will manage.\nDataSource has to be in the same namespace.", + "retentionPolicy": "RetentionPolicy specifies whether the created DataVolumes and DataSources are retained when their DataImportCron is deleted. Default is RatainAll.\n+optional", + } +} + +func (DataImportCronStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataImportCronStatus provides the most recently observed status of the DataImportCron", + "currentImports": "CurrentImports are the imports in progress. Currently only a single import is supported.", + "lastImportedPVC": "LastImportedPVC is the last imported PVC", + "lastExecutionTimestamp": "LastExecutionTimestamp is the time of the last polling", + "lastImportTimestamp": "LastImportTimestamp is the time of the last import", + } +} + +func (ImportStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ImportStatus of a currently in progress import", + "DataVolumeName": "DataVolumeName is the currently in progress import DataVolume", + "Digest": "Digest of the currently imported image", + } +} + +func (DataImportCronCondition) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataImportCronCondition represents the state of a data import cron condition", + } +} + +func (DataImportCronList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "DataImportCronList provides the needed parameters to do request a list of DataImportCrons from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of DataImportCrons", + } +} + +func (CDI) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDI is the CDI Operator CRD\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:shortName=cdi;cdis,scope=Cluster\n+kubebuilder:printcolumn:name=\"Age\",type=\"date\",JSONPath=\".metadata.creationTimestamp\"\n+kubebuilder:printcolumn:name=\"Phase\",type=\"string\",JSONPath=\".status.phase\"", + "status": "+optional", + } +} + +func (CertConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CertConfig contains the tunables for TLS certificates", + "duration": "The requested 'duration' (i.e. lifetime) of the Certificate.", + "renewBefore": "The amount of time before the currently issued certificate's `notAfter`\ntime that we will begin to attempt to renew the certificate.", + } +} + +func (CDICertConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDICertConfig has the CertConfigs for CDI", + "ca": "CA configuration\nCA certs are kept in the CA bundle as long as they are valid", + "server": "Server configuration\nCerts are rotated and discarded", + } +} + +func (CDISpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDISpec defines our specification for the CDI installation", + "imagePullPolicy": "+kubebuilder:validation:Enum=Always;IfNotPresent;Never\nPullPolicy describes a policy for if/when to pull a container image", + "uninstallStrategy": "+kubebuilder:validation:Enum=RemoveWorkloads;BlockUninstallIfWorkloadsExist\nCDIUninstallStrategy defines the state to leave CDI on uninstall", + "infra": "Rules on which nodes CDI infrastructure pods will be scheduled", + "workload": "Restrict on which nodes CDI workload pods will be scheduled", + "cloneStrategyOverride": "Clone strategy override: should we use a host-assisted copy even if snapshots are available?\n+kubebuilder:validation:Enum=\"copy\";\"snapshot\"", + "config": "CDIConfig at CDI level", + "certConfig": "certificate configuration", + "priorityClass": "PriorityClass of the CDI control plane", + } +} + +func (CDIStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIStatus defines the status of the installation", + } +} + +func (CDIList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIList provides the needed parameters to do request a list of CDIs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of CDIs", + } +} + +func (CDIConfig) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIConfig provides a user configuration for CDI\n+genclient\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true\n+kubebuilder:storageversion\n+kubebuilder:resource:scope=Cluster", + } +} + +func (FilesystemOverhead) SwaggerDoc() map[string]string { + return map[string]string{ + "": "FilesystemOverhead defines the reserved size for PVCs with VolumeMode: Filesystem", + "global": "Global is how much space of a Filesystem volume should be reserved for overhead. This value is used unless overridden by a more specific value (per storageClass)", + "storageClass": "StorageClass specifies how much space of a Filesystem volume should be reserved for safety. The keys are the storageClass and the values are the overhead. This value overrides the global value", + } +} + +func (CDIConfigSpec) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIConfigSpec defines specification for user configuration", + "uploadProxyURLOverride": "Override the URL used when uploading to a DataVolume", + "importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional", + "scratchSpaceStorageClass": "Override the storage class to used for scratch space during transfer operations. The scratch space storage class is determined in the following order: 1. value of scratchSpaceStorageClass, if that doesn't exist, use the default storage class, if there is no default storage class, use the storage class of the DataVolume, if no storage class specified, use no storage class for scratch space", + "podResourceRequirements": "ResourceRequirements describes the compute resource requirements.", + "featureGates": "FeatureGates are a list of specific enabled feature gates", + "filesystemOverhead": "FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A value is between 0 and 1, if not defined it is 0.055 (5.5% overhead)", + "preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.", + "insecureRegistries": "InsecureRegistries is a list of TLS disabled registries", + "dataVolumeTTLSeconds": "DataVolumeTTLSeconds is the time in seconds after DataVolume completion it can be garbage collected. The default is 0 sec. To disable GC use -1.\n+optional", + "tlsSecurityProfile": "TLSSecurityProfile is used by operators to apply cluster-wide TLS security settings to operands.", + } +} + +func (CDIConfigStatus) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIConfigStatus provides the most recently observed status of the CDI Config resource", + "uploadProxyURL": "The calculated upload proxy URL", + "importProxy": "ImportProxy contains importer pod proxy configuration.\n+optional", + "scratchSpaceStorageClass": "The calculated storage class to be used for scratch space", + "defaultPodResourceRequirements": "ResourceRequirements describes the compute resource requirements.", + "filesystemOverhead": "FilesystemOverhead describes the space reserved for overhead when using Filesystem volumes. A percentage value is between 0 and 1", + "preallocation": "Preallocation controls whether storage for DataVolumes should be allocated in advance.", + } +} + +func (CDIConfigList) SwaggerDoc() map[string]string { + return map[string]string{ + "": "CDIConfigList provides the needed parameters to do request a list of CDIConfigs from the system\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "items": "Items provides a list of CDIConfigs", + } +} + +func (ImportProxy) SwaggerDoc() map[string]string { + return map[string]string{ + "": "ImportProxy provides the information on how to configure the importer pod proxy.", + "HTTPProxy": "HTTPProxy is the URL http://:@: of the import proxy for HTTP requests. Empty means unset and will not result in the import pod env var.\n+optional", + "HTTPSProxy": "HTTPSProxy is the URL https://:@: of the import proxy for HTTPS requests. Empty means unset and will not result in the import pod env var.\n+optional", + "noProxy": "NoProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in the import pod env var.\n+optional", + "trustedCAProxy": "TrustedCAProxy is the name of a ConfigMap in the cdi namespace that contains a user-provided trusted certificate authority (CA) bundle.\nThe TrustedCAProxy ConfigMap is consumed by the DataImportCron controller for creating cronjobs, and by the import controller referring a copy of the ConfigMap in the import namespace.\nHere is an example of the ConfigMap (in yaml):\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: my-ca-proxy-cm\n namespace: cdi\ndata:\n ca.pem: |", + } +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go new file mode 100644 index 000000000..d87e5ffb2 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/types_transfer.go @@ -0,0 +1,134 @@ +/* +Copyright 2021 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// this has to be here otherwise informer-gen doesn't recognize it +// see https://github.com/kubernetes/code-generator/issues/59 +// +genclient:nonNamespaced + +// ObjectTransfer is the cluster scoped object transfer resource +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion +// +kubebuilder:resource:shortName=ot;ots,scope=Cluster +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase of the ObjectTransfer" +// +kubebuilder:subresource:status +type ObjectTransfer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ObjectTransferSpec `json:"spec"` + + // +optional + Status ObjectTransferStatus `json:"status"` +} + +// TransferSource is the source of a ObjectTransfer +type TransferSource struct { + // +optional + APIVersion string `json:"apiVersion,omitempty"` + + Kind string `json:"kind"` + + Namespace string `json:"namespace"` + + Name string `json:"name"` + + RequiredAnnotations map[string]string `json:"requiredAnnotations,omitempty"` +} + +// TransferTarget is the target of an ObjectTransfer +type TransferTarget struct { + Namespace *string `json:"namespace,omitempty"` + + Name *string `json:"name,omitempty"` +} + +// ObjectTransferSpec specifies the source/target of the transfer +type ObjectTransferSpec struct { + Source TransferSource `json:"source"` + + Target TransferTarget `json:"target"` + + ParentName *string `json:"parentName,omitempty"` +} + +// ObjectTransferPhase is the phase of the ObjectTransfer +type ObjectTransferPhase string + +const ( + // ObjectTransferEmpty is the empty transfer phase + ObjectTransferEmpty ObjectTransferPhase = "" + + // ObjectTransferPending is the pending transfer phase + ObjectTransferPending ObjectTransferPhase = "Pending" + + // ObjectTransferRunning is the running transfer phase + ObjectTransferRunning ObjectTransferPhase = "Running" + + // ObjectTransferComplete is the complete transfer phase + ObjectTransferComplete ObjectTransferPhase = "Complete" + + // ObjectTransferError is the (terminal) error transfer phase + ObjectTransferError ObjectTransferPhase = "Error" +) + +// ObjectTransferConditionType is the type of ObjectTransferCondition +type ObjectTransferConditionType string + +const ( + // ObjectTransferConditionComplete is the "complete" condition + ObjectTransferConditionComplete ObjectTransferConditionType = "Complete" +) + +// ObjectTransferCondition contains condition data +type ObjectTransferCondition struct { + Type ObjectTransferConditionType `json:"type"` + Status corev1.ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty"` +} + +// ObjectTransferStatus is the status of the ObjectTransfer +type ObjectTransferStatus struct { + // Data is a place for intermediary state. Or anything really. + Data map[string]string `json:"data,omitempty"` + + // Phase is the current phase of the transfer + Phase ObjectTransferPhase `json:"phase,omitempty"` + + Conditions []ObjectTransferCondition `json:"conditions,omitempty"` +} + +// ObjectTransferList provides the needed parameters to do request a list of ObjectTransfers from the system +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectTransferList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + // Items provides a list of ObjectTransfers + Items []ObjectTransfer `json:"items"` +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go new file mode 100644 index 000000000..b935d7f33 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/utils.go @@ -0,0 +1,61 @@ +/* +Copyright 2020 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// IsPopulated indicates if the persistent volume passed in has been fully populated. It follow the following logic +// 1. If the PVC is not owned by a DataVolume, return true, we assume someone else has properly populated the image +// 2. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase succeeded return true +// 3. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase !succeeded return false +func IsPopulated(pvc *corev1.PersistentVolumeClaim, getDvFunc func(name, namespace string) (*DataVolume, error)) (bool, error) { + pvcOwner := metav1.GetControllerOf(pvc) + if pvcOwner != nil && pvcOwner.Kind == "DataVolume" { + // Find the data volume: + dv, err := getDvFunc(pvcOwner.Name, pvc.Namespace) + if err != nil { + return false, err + } + if dv.Status.Phase != Succeeded { + return false, nil + } + } + return true, nil +} + +// IsWaitForFirstConsumerBeforePopulating indicates if the persistent volume passed in is in ClaimPending state and waiting for first consumer. +// It follow the following logic +// 1. If the PVC is not owned by a DataVolume, return false, we can not assume it will be populated +// 2. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase WaitForFirstConsumer return true +// 3. If the PVC is owned by a DataVolume, look up the DV and check the phase, if phase !WaitForFirstConsumer return false +func IsWaitForFirstConsumerBeforePopulating(pvc *corev1.PersistentVolumeClaim, getDvFunc func(name, namespace string) (*DataVolume, error)) (bool, error) { + pvcOwner := metav1.GetControllerOf(pvc) + if pvcOwner != nil && pvcOwner.Kind == "DataVolume" { + // Find the data volume: + dv, err := getDvFunc(pvcOwner.Name, pvc.Namespace) + if err != nil { + return false, err + } + if dv.Status.Phase == WaitForFirstConsumer { + return true, nil + } + } + return false, nil +} diff --git a/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8913ef514 --- /dev/null +++ b/vendor/kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1560 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2018 The CDI Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDI) DeepCopyInto(out *CDI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDI. +func (in *CDI) DeepCopy() *CDI { + if in == nil { + return nil + } + out := new(CDI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDICertConfig) DeepCopyInto(out *CDICertConfig) { + *out = *in + if in.CA != nil { + in, out := &in.CA, &out.CA + *out = new(CertConfig) + (*in).DeepCopyInto(*out) + } + if in.Server != nil { + in, out := &in.Server, &out.Server + *out = new(CertConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDICertConfig. +func (in *CDICertConfig) DeepCopy() *CDICertConfig { + if in == nil { + return nil + } + out := new(CDICertConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIConfig) DeepCopyInto(out *CDIConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfig. +func (in *CDIConfig) DeepCopy() *CDIConfig { + if in == nil { + return nil + } + out := new(CDIConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDIConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIConfigList) DeepCopyInto(out *CDIConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDIConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigList. +func (in *CDIConfigList) DeepCopy() *CDIConfigList { + if in == nil { + return nil + } + out := new(CDIConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDIConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIConfigSpec) DeepCopyInto(out *CDIConfigSpec) { + *out = *in + if in.UploadProxyURLOverride != nil { + in, out := &in.UploadProxyURLOverride, &out.UploadProxyURLOverride + *out = new(string) + **out = **in + } + if in.ImportProxy != nil { + in, out := &in.ImportProxy, &out.ImportProxy + *out = new(ImportProxy) + (*in).DeepCopyInto(*out) + } + if in.ScratchSpaceStorageClass != nil { + in, out := &in.ScratchSpaceStorageClass, &out.ScratchSpaceStorageClass + *out = new(string) + **out = **in + } + if in.PodResourceRequirements != nil { + in, out := &in.PodResourceRequirements, &out.PodResourceRequirements + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.FilesystemOverhead != nil { + in, out := &in.FilesystemOverhead, &out.FilesystemOverhead + *out = new(FilesystemOverhead) + (*in).DeepCopyInto(*out) + } + if in.Preallocation != nil { + in, out := &in.Preallocation, &out.Preallocation + *out = new(bool) + **out = **in + } + if in.InsecureRegistries != nil { + in, out := &in.InsecureRegistries, &out.InsecureRegistries + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DataVolumeTTLSeconds != nil { + in, out := &in.DataVolumeTTLSeconds, &out.DataVolumeTTLSeconds + *out = new(int32) + **out = **in + } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigSpec. +func (in *CDIConfigSpec) DeepCopy() *CDIConfigSpec { + if in == nil { + return nil + } + out := new(CDIConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIConfigStatus) DeepCopyInto(out *CDIConfigStatus) { + *out = *in + if in.UploadProxyURL != nil { + in, out := &in.UploadProxyURL, &out.UploadProxyURL + *out = new(string) + **out = **in + } + if in.ImportProxy != nil { + in, out := &in.ImportProxy, &out.ImportProxy + *out = new(ImportProxy) + (*in).DeepCopyInto(*out) + } + if in.DefaultPodResourceRequirements != nil { + in, out := &in.DefaultPodResourceRequirements, &out.DefaultPodResourceRequirements + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.FilesystemOverhead != nil { + in, out := &in.FilesystemOverhead, &out.FilesystemOverhead + *out = new(FilesystemOverhead) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIConfigStatus. +func (in *CDIConfigStatus) DeepCopy() *CDIConfigStatus { + if in == nil { + return nil + } + out := new(CDIConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIList) DeepCopyInto(out *CDIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CDI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIList. +func (in *CDIList) DeepCopy() *CDIList { + if in == nil { + return nil + } + out := new(CDIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CDIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDISpec) DeepCopyInto(out *CDISpec) { + *out = *in + if in.UninstallStrategy != nil { + in, out := &in.UninstallStrategy, &out.UninstallStrategy + *out = new(CDIUninstallStrategy) + **out = **in + } + in.Infra.DeepCopyInto(&out.Infra) + in.Workloads.DeepCopyInto(&out.Workloads) + if in.CloneStrategyOverride != nil { + in, out := &in.CloneStrategyOverride, &out.CloneStrategyOverride + *out = new(CDICloneStrategy) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(CDIConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.CertConfig != nil { + in, out := &in.CertConfig, &out.CertConfig + *out = new(CDICertConfig) + (*in).DeepCopyInto(*out) + } + if in.PriorityClass != nil { + in, out := &in.PriorityClass, &out.PriorityClass + *out = new(CDIPriorityClass) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDISpec. +func (in *CDISpec) DeepCopy() *CDISpec { + if in == nil { + return nil + } + out := new(CDISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CDIStatus) DeepCopyInto(out *CDIStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CDIStatus. +func (in *CDIStatus) DeepCopy() *CDIStatus { + if in == nil { + return nil + } + out := new(CDIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertConfig) DeepCopyInto(out *CertConfig) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(metav1.Duration) + **out = **in + } + if in.RenewBefore != nil { + in, out := &in.RenewBefore, &out.RenewBefore + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertConfig. +func (in *CertConfig) DeepCopy() *CertConfig { + if in == nil { + return nil + } + out := new(CertConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimPropertySet) DeepCopyInto(out *ClaimPropertySet) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]v1.PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(v1.PersistentVolumeMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimPropertySet. +func (in *ClaimPropertySet) DeepCopy() *ClaimPropertySet { + if in == nil { + return nil + } + out := new(ClaimPropertySet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionState) DeepCopyInto(out *ConditionState) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionState. +func (in *ConditionState) DeepCopy() *ConditionState { + if in == nil { + return nil + } + out := new(ConditionState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportCron) DeepCopyInto(out *DataImportCron) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCron. +func (in *DataImportCron) DeepCopy() *DataImportCron { + if in == nil { + return nil + } + out := new(DataImportCron) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataImportCron) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportCronCondition) DeepCopyInto(out *DataImportCronCondition) { + *out = *in + in.ConditionState.DeepCopyInto(&out.ConditionState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronCondition. +func (in *DataImportCronCondition) DeepCopy() *DataImportCronCondition { + if in == nil { + return nil + } + out := new(DataImportCronCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportCronList) DeepCopyInto(out *DataImportCronList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataImportCron, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronList. +func (in *DataImportCronList) DeepCopy() *DataImportCronList { + if in == nil { + return nil + } + out := new(DataImportCronList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataImportCronList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportCronSpec) DeepCopyInto(out *DataImportCronSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.GarbageCollect != nil { + in, out := &in.GarbageCollect, &out.GarbageCollect + *out = new(DataImportCronGarbageCollect) + **out = **in + } + if in.ImportsToKeep != nil { + in, out := &in.ImportsToKeep, &out.ImportsToKeep + *out = new(int32) + **out = **in + } + if in.RetentionPolicy != nil { + in, out := &in.RetentionPolicy, &out.RetentionPolicy + *out = new(DataImportCronRetentionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronSpec. +func (in *DataImportCronSpec) DeepCopy() *DataImportCronSpec { + if in == nil { + return nil + } + out := new(DataImportCronSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImportCronStatus) DeepCopyInto(out *DataImportCronStatus) { + *out = *in + if in.CurrentImports != nil { + in, out := &in.CurrentImports, &out.CurrentImports + *out = make([]ImportStatus, len(*in)) + copy(*out, *in) + } + if in.LastImportedPVC != nil { + in, out := &in.LastImportedPVC, &out.LastImportedPVC + *out = new(DataVolumeSourcePVC) + **out = **in + } + if in.LastExecutionTimestamp != nil { + in, out := &in.LastExecutionTimestamp, &out.LastExecutionTimestamp + *out = (*in).DeepCopy() + } + if in.LastImportTimestamp != nil { + in, out := &in.LastImportTimestamp, &out.LastImportTimestamp + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DataImportCronCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImportCronStatus. +func (in *DataImportCronStatus) DeepCopy() *DataImportCronStatus { + if in == nil { + return nil + } + out := new(DataImportCronStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSource) DeepCopyInto(out *DataSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSource. +func (in *DataSource) DeepCopy() *DataSource { + if in == nil { + return nil + } + out := new(DataSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceCondition) DeepCopyInto(out *DataSourceCondition) { + *out = *in + in.ConditionState.DeepCopyInto(&out.ConditionState) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceCondition. +func (in *DataSourceCondition) DeepCopy() *DataSourceCondition { + if in == nil { + return nil + } + out := new(DataSourceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceList) DeepCopyInto(out *DataSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceList. +func (in *DataSourceList) DeepCopy() *DataSourceList { + if in == nil { + return nil + } + out := new(DataSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceSource) DeepCopyInto(out *DataSourceSource) { + *out = *in + if in.PVC != nil { + in, out := &in.PVC, &out.PVC + *out = new(DataVolumeSourcePVC) + **out = **in + } + if in.Snapshot != nil { + in, out := &in.Snapshot, &out.Snapshot + *out = new(DataVolumeSourceSnapshot) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceSource. +func (in *DataSourceSource) DeepCopy() *DataSourceSource { + if in == nil { + return nil + } + out := new(DataSourceSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceSpec) DeepCopyInto(out *DataSourceSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceSpec. +func (in *DataSourceSpec) DeepCopy() *DataSourceSpec { + if in == nil { + return nil + } + out := new(DataSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSourceStatus) DeepCopyInto(out *DataSourceStatus) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DataSourceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSourceStatus. +func (in *DataSourceStatus) DeepCopy() *DataSourceStatus { + if in == nil { + return nil + } + out := new(DataSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolume) DeepCopyInto(out *DataVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolume. +func (in *DataVolume) DeepCopy() *DataVolume { + if in == nil { + return nil + } + out := new(DataVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeBlankImage) DeepCopyInto(out *DataVolumeBlankImage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeBlankImage. +func (in *DataVolumeBlankImage) DeepCopy() *DataVolumeBlankImage { + if in == nil { + return nil + } + out := new(DataVolumeBlankImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeCheckpoint) DeepCopyInto(out *DataVolumeCheckpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeCheckpoint. +func (in *DataVolumeCheckpoint) DeepCopy() *DataVolumeCheckpoint { + if in == nil { + return nil + } + out := new(DataVolumeCheckpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeCondition) DeepCopyInto(out *DataVolumeCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeCondition. +func (in *DataVolumeCondition) DeepCopy() *DataVolumeCondition { + if in == nil { + return nil + } + out := new(DataVolumeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeList) DeepCopyInto(out *DataVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeList. +func (in *DataVolumeList) DeepCopy() *DataVolumeList { + if in == nil { + return nil + } + out := new(DataVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSource) DeepCopyInto(out *DataVolumeSource) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(DataVolumeSourceHTTP) + (*in).DeepCopyInto(*out) + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(DataVolumeSourceS3) + **out = **in + } + if in.Registry != nil { + in, out := &in.Registry, &out.Registry + *out = new(DataVolumeSourceRegistry) + (*in).DeepCopyInto(*out) + } + if in.PVC != nil { + in, out := &in.PVC, &out.PVC + *out = new(DataVolumeSourcePVC) + **out = **in + } + if in.Upload != nil { + in, out := &in.Upload, &out.Upload + *out = new(DataVolumeSourceUpload) + **out = **in + } + if in.Blank != nil { + in, out := &in.Blank, &out.Blank + *out = new(DataVolumeBlankImage) + **out = **in + } + if in.Imageio != nil { + in, out := &in.Imageio, &out.Imageio + *out = new(DataVolumeSourceImageIO) + **out = **in + } + if in.VDDK != nil { + in, out := &in.VDDK, &out.VDDK + *out = new(DataVolumeSourceVDDK) + **out = **in + } + if in.Snapshot != nil { + in, out := &in.Snapshot, &out.Snapshot + *out = new(DataVolumeSourceSnapshot) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSource. +func (in *DataVolumeSource) DeepCopy() *DataVolumeSource { + if in == nil { + return nil + } + out := new(DataVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceHTTP) DeepCopyInto(out *DataVolumeSourceHTTP) { + *out = *in + if in.ExtraHeaders != nil { + in, out := &in.ExtraHeaders, &out.ExtraHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretExtraHeaders != nil { + in, out := &in.SecretExtraHeaders, &out.SecretExtraHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceHTTP. +func (in *DataVolumeSourceHTTP) DeepCopy() *DataVolumeSourceHTTP { + if in == nil { + return nil + } + out := new(DataVolumeSourceHTTP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceImageIO) DeepCopyInto(out *DataVolumeSourceImageIO) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceImageIO. +func (in *DataVolumeSourceImageIO) DeepCopy() *DataVolumeSourceImageIO { + if in == nil { + return nil + } + out := new(DataVolumeSourceImageIO) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourcePVC) DeepCopyInto(out *DataVolumeSourcePVC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourcePVC. +func (in *DataVolumeSourcePVC) DeepCopy() *DataVolumeSourcePVC { + if in == nil { + return nil + } + out := new(DataVolumeSourcePVC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceRef) DeepCopyInto(out *DataVolumeSourceRef) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceRef. +func (in *DataVolumeSourceRef) DeepCopy() *DataVolumeSourceRef { + if in == nil { + return nil + } + out := new(DataVolumeSourceRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceRegistry) DeepCopyInto(out *DataVolumeSourceRegistry) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.ImageStream != nil { + in, out := &in.ImageStream, &out.ImageStream + *out = new(string) + **out = **in + } + if in.PullMethod != nil { + in, out := &in.PullMethod, &out.PullMethod + *out = new(RegistryPullMethod) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(string) + **out = **in + } + if in.CertConfigMap != nil { + in, out := &in.CertConfigMap, &out.CertConfigMap + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceRegistry. +func (in *DataVolumeSourceRegistry) DeepCopy() *DataVolumeSourceRegistry { + if in == nil { + return nil + } + out := new(DataVolumeSourceRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceS3) DeepCopyInto(out *DataVolumeSourceS3) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceS3. +func (in *DataVolumeSourceS3) DeepCopy() *DataVolumeSourceS3 { + if in == nil { + return nil + } + out := new(DataVolumeSourceS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceSnapshot) DeepCopyInto(out *DataVolumeSourceSnapshot) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceSnapshot. +func (in *DataVolumeSourceSnapshot) DeepCopy() *DataVolumeSourceSnapshot { + if in == nil { + return nil + } + out := new(DataVolumeSourceSnapshot) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceUpload) DeepCopyInto(out *DataVolumeSourceUpload) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceUpload. +func (in *DataVolumeSourceUpload) DeepCopy() *DataVolumeSourceUpload { + if in == nil { + return nil + } + out := new(DataVolumeSourceUpload) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSourceVDDK) DeepCopyInto(out *DataVolumeSourceVDDK) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSourceVDDK. +func (in *DataVolumeSourceVDDK) DeepCopy() *DataVolumeSourceVDDK { + if in == nil { + return nil + } + out := new(DataVolumeSourceVDDK) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeSpec) DeepCopyInto(out *DataVolumeSpec) { + *out = *in + if in.Source != nil { + in, out := &in.Source, &out.Source + *out = new(DataVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.SourceRef != nil { + in, out := &in.SourceRef, &out.SourceRef + *out = new(DataVolumeSourceRef) + (*in).DeepCopyInto(*out) + } + if in.PVC != nil { + in, out := &in.PVC, &out.PVC + *out = new(v1.PersistentVolumeClaimSpec) + (*in).DeepCopyInto(*out) + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(StorageSpec) + (*in).DeepCopyInto(*out) + } + if in.Checkpoints != nil { + in, out := &in.Checkpoints, &out.Checkpoints + *out = make([]DataVolumeCheckpoint, len(*in)) + copy(*out, *in) + } + if in.Preallocation != nil { + in, out := &in.Preallocation, &out.Preallocation + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeSpec. +func (in *DataVolumeSpec) DeepCopy() *DataVolumeSpec { + if in == nil { + return nil + } + out := new(DataVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataVolumeStatus) DeepCopyInto(out *DataVolumeStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DataVolumeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataVolumeStatus. +func (in *DataVolumeStatus) DeepCopy() *DataVolumeStatus { + if in == nil { + return nil + } + out := new(DataVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FilesystemOverhead) DeepCopyInto(out *FilesystemOverhead) { + *out = *in + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = make(map[string]Percent, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FilesystemOverhead. +func (in *FilesystemOverhead) DeepCopy() *FilesystemOverhead { + if in == nil { + return nil + } + out := new(FilesystemOverhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportProxy) DeepCopyInto(out *ImportProxy) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + if in.TrustedCAProxy != nil { + in, out := &in.TrustedCAProxy, &out.TrustedCAProxy + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportProxy. +func (in *ImportProxy) DeepCopy() *ImportProxy { + if in == nil { + return nil + } + out := new(ImportProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImportStatus) DeepCopyInto(out *ImportStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportStatus. +func (in *ImportStatus) DeepCopy() *ImportStatus { + if in == nil { + return nil + } + out := new(ImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransfer) DeepCopyInto(out *ObjectTransfer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransfer. +func (in *ObjectTransfer) DeepCopy() *ObjectTransfer { + if in == nil { + return nil + } + out := new(ObjectTransfer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectTransfer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferCondition) DeepCopyInto(out *ObjectTransferCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferCondition. +func (in *ObjectTransferCondition) DeepCopy() *ObjectTransferCondition { + if in == nil { + return nil + } + out := new(ObjectTransferCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferList) DeepCopyInto(out *ObjectTransferList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ObjectTransfer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferList. +func (in *ObjectTransferList) DeepCopy() *ObjectTransferList { + if in == nil { + return nil + } + out := new(ObjectTransferList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectTransferList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferSpec) DeepCopyInto(out *ObjectTransferSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + in.Target.DeepCopyInto(&out.Target) + if in.ParentName != nil { + in, out := &in.ParentName, &out.ParentName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferSpec. +func (in *ObjectTransferSpec) DeepCopy() *ObjectTransferSpec { + if in == nil { + return nil + } + out := new(ObjectTransferSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectTransferStatus) DeepCopyInto(out *ObjectTransferStatus) { + *out = *in + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ObjectTransferCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectTransferStatus. +func (in *ObjectTransferStatus) DeepCopy() *ObjectTransferStatus { + if in == nil { + return nil + } + out := new(ObjectTransferStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfile) DeepCopyInto(out *StorageProfile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfile. +func (in *StorageProfile) DeepCopy() *StorageProfile { + if in == nil { + return nil + } + out := new(StorageProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageProfile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileList) DeepCopyInto(out *StorageProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileList. +func (in *StorageProfileList) DeepCopy() *StorageProfileList { + if in == nil { + return nil + } + out := new(StorageProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileSpec) DeepCopyInto(out *StorageProfileSpec) { + *out = *in + if in.CloneStrategy != nil { + in, out := &in.CloneStrategy, &out.CloneStrategy + *out = new(CDICloneStrategy) + **out = **in + } + if in.ClaimPropertySets != nil { + in, out := &in.ClaimPropertySets, &out.ClaimPropertySets + *out = make([]ClaimPropertySet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileSpec. +func (in *StorageProfileSpec) DeepCopy() *StorageProfileSpec { + if in == nil { + return nil + } + out := new(StorageProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageProfileStatus) DeepCopyInto(out *StorageProfileStatus) { + *out = *in + if in.StorageClass != nil { + in, out := &in.StorageClass, &out.StorageClass + *out = new(string) + **out = **in + } + if in.Provisioner != nil { + in, out := &in.Provisioner, &out.Provisioner + *out = new(string) + **out = **in + } + if in.CloneStrategy != nil { + in, out := &in.CloneStrategy, &out.CloneStrategy + *out = new(CDICloneStrategy) + **out = **in + } + if in.ClaimPropertySets != nil { + in, out := &in.ClaimPropertySets, &out.ClaimPropertySets + *out = make([]ClaimPropertySet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageProfileStatus. +func (in *StorageProfileStatus) DeepCopy() *StorageProfileStatus { + if in == nil { + return nil + } + out := new(StorageProfileStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageSpec) DeepCopyInto(out *StorageSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]v1.PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(v1.PersistentVolumeMode) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + if in.DataSourceRef != nil { + in, out := &in.DataSourceRef, &out.DataSourceRef + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec. +func (in *StorageSpec) DeepCopy() *StorageSpec { + if in == nil { + return nil + } + out := new(StorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferSource) DeepCopyInto(out *TransferSource) { + *out = *in + if in.RequiredAnnotations != nil { + in, out := &in.RequiredAnnotations, &out.RequiredAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferSource. +func (in *TransferSource) DeepCopy() *TransferSource { + if in == nil { + return nil + } + out := new(TransferSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TransferTarget) DeepCopyInto(out *TransferTarget) { + *out = *in + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TransferTarget. +func (in *TransferTarget) DeepCopy() *TransferTarget { + if in == nil { + return nil + } + out := new(TransferTarget) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/LICENSE b/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go b/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go new file mode 100644 index 000000000..b99715c3c --- /dev/null +++ b/vendor/kubevirt.io/controller-lifecycle-operator-sdk/api/types.go @@ -0,0 +1,130 @@ +package api + +import ( + conditions "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" +) + +// Phase is the current phase of the deployment +type Phase string + +const ( + // PhaseDeploying signals that the resources are being deployed + PhaseDeploying Phase = "Deploying" + + // PhaseDeployed signals that the resources are successfully deployed + PhaseDeployed Phase = "Deployed" + + // PhaseDeleting signals that the resources are being removed + PhaseDeleting Phase = "Deleting" + + // PhaseDeleted signals that the resources are deleted + PhaseDeleted Phase = "Deleted" + + // PhaseError signals that the deployment is in an error state + PhaseError Phase = "Error" + + // PhaseUpgrading signals that the resources are being deployed + PhaseUpgrading Phase = "Upgrading" + + // PhaseEmpty is an uninitialized phase + PhaseEmpty Phase = "" +) + +// Status represents status of a operator configuration resource; must be inlined in the operator configuration resource status +type Status struct { + Phase Phase `json:"phase,omitempty"` + // A list of current conditions of the resource + Conditions []conditions.Condition `json:"conditions,omitempty" optional:"true"` + // The version of the resource as defined by the operator + OperatorVersion string `json:"operatorVersion,omitempty" optional:"true"` + // The desired version of the resource + TargetVersion string `json:"targetVersion,omitempty" optional:"true"` + // The observed version of the resource + ObservedVersion string `json:"observedVersion,omitempty" optional:"true"` +} + +// NodePlacement describes node scheduling configuration. +// +k8s:openapi-gen=true +type NodePlacement struct { + // nodeSelector is the node selector applied to the relevant kind of pods + // It specifies a map of key-value pairs: for the pod to be eligible to run on a node, + // the node must have each of the indicated key-value pairs as labels + // (it can have additional labels as well). + // See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + // +kubebuilder:validation:Optional + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // affinity enables pod affinity/anti-affinity placement expanding the types of constraints + // that can be expressed with nodeSelector. + // affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector + // See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + // +kubebuilder:validation:Optional + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + + // tolerations is a list of tolerations applied to the relevant kind of pods + // See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. + // These are additional tolerations other than default ones. + // +kubebuilder:validation:Optional + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// DeepCopyInto is copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]conditions.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePlacement. +func (in *NodePlacement) DeepCopy() *NodePlacement { + if in == nil { + return nil + } + out := new(NodePlacement) + in.DeepCopyInto(out) + return out +} + +// SwaggerDoc provides documentation for NodePlacement +func (NodePlacement) SwaggerDoc() map[string]string { + return map[string]string{ + "": "NodePlacement describes node scheduling configuration.", + "nodeSelector": "nodeSelector is the node selector applied to the relevant kind of pods\nIt specifies a map of key-value pairs: for the pod to be eligible to run on a node,\nthe node must have each of the indicated key-value pairs as labels\n(it can have additional labels as well).\nSee https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector\n+kubebuilder:validation:Optional\n+optional", + "affinity": "affinity enables pod affinity/anti-affinity placement expanding the types of constraints\nthat can be expressed with nodeSelector.\naffinity is going to be applied to the relevant kind of pods in parallel with nodeSelector\nSee https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity\n+kubebuilder:validation:Optional\n+optional", + "tolerations": "tolerations is a list of tolerations applied to the relevant kind of pods\nSee https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info.\nThese are additional tolerations other than default ones.\n+kubebuilder:validation:Optional\n+optional", + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2e65f6108..ec060ab9c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -35,7 +35,7 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful v2.9.5+incompatible +# github.com/emicklei/go-restful v2.15.0+incompatible ## explicit github.com/emicklei/go-restful github.com/emicklei/go-restful/log @@ -48,7 +48,7 @@ github.com/form3tech-oss/jwt-go # github.com/fsnotify/fsnotify v1.5.1 ## explicit; go 1.13 github.com/fsnotify/fsnotify -# github.com/go-logr/logr v1.2.0 +# github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr # github.com/go-logr/zapr v1.2.0 @@ -57,10 +57,10 @@ github.com/go-logr/zapr # github.com/go-openapi/jsonpointer v0.19.5 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.5 +# github.com/go-openapi/jsonreference v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonreference -# github.com/go-openapi/swag v0.19.14 +# github.com/go-openapi/swag v0.21.1 ## explicit; go 1.11 github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 @@ -84,7 +84,7 @@ github.com/google/gnostic/extensions github.com/google/gnostic/jsonschema github.com/google/gnostic/openapiv2 github.com/google/gnostic/openapiv3 -# github.com/google/go-cmp v0.5.5 +# github.com/google/go-cmp v0.5.6 ## explicit; go 1.8 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff @@ -131,7 +131,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer @@ -172,6 +172,15 @@ github.com/onsi/gomega/matchers/support/goraph/edge github.com/onsi/gomega/matchers/support/goraph/node github.com/onsi/gomega/matchers/support/goraph/util github.com/onsi/gomega/types +# github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 +## explicit; go 1.16 +github.com/openshift/api/config/v1 +# github.com/openshift/custom-resource-status v1.1.2 +## explicit; go 1.12 +github.com/openshift/custom-resource-status/conditions/v1 +# github.com/pborman/uuid v1.2.0 +## explicit +github.com/pborman/uuid # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -604,6 +613,17 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace +# kubevirt.io/api v0.59.0 +## explicit; go 1.17 +kubevirt.io/api/core +kubevirt.io/api/core/v1 +# kubevirt.io/containerized-data-importer-api v1.56.0 +## explicit; go 1.18 +kubevirt.io/containerized-data-importer-api/pkg/apis/core +kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1 +# kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 +## explicit; go 1.17 +kubevirt.io/controller-lifecycle-operator-sdk/api # sigs.k8s.io/controller-runtime v0.12.2 ## explicit; go 1.17 sigs.k8s.io/controller-runtime From 8b59b35be644992635194fd2716ee9e7a32fbfd6 Mon Sep 17 00:00:00 2001 From: testisnullus Date: Wed, 15 Nov 2023 10:29:43 +0200 Subject: [PATCH 2/4] On premises flow was added for all cluster resources --- apis/clusters/v1beta1/cadence_types.go | 66 +- apis/clusters/v1beta1/cadence_webhook.go | 31 +- apis/clusters/v1beta1/cassandra_types.go | 80 ++ apis/clusters/v1beta1/cassandra_webhook.go | 31 +- apis/clusters/v1beta1/kafka_types.go | 72 ++ apis/clusters/v1beta1/kafka_webhook.go | 31 +- apis/clusters/v1beta1/kafkaconnect_types.go | 40 +- apis/clusters/v1beta1/kafkaconnect_webhook.go | 39 +- apis/clusters/v1beta1/postgresql_types.go | 34 + apis/clusters/v1beta1/postgresql_webhook.go | 31 +- apis/clusters/v1beta1/redis_types.go | 54 +- apis/clusters/v1beta1/redis_webhook.go | 30 +- apis/clusters/v1beta1/structs.go | 12 + apis/clusters/v1beta1/validation.go | 48 + .../clusters/v1beta1/zz_generated.deepcopy.go | 50 + .../clusters.instaclustr.com_cadences.yaml | 39 + .../clusters.instaclustr.com_cassandras.yaml | 39 + ...lusters.instaclustr.com_kafkaconnects.yaml | 39 + .../clusters.instaclustr.com_kafkas.yaml | 51 ++ .../clusters.instaclustr.com_postgresqls.yaml | 39 + .../bases/clusters.instaclustr.com_redis.yaml | 39 + config/rbac/role.yaml | 66 ++ .../samples/clusters_v1beta1_cassandra.yaml | 31 +- .../onpremises/clusters_v1beta1_cadence.yaml | 63 ++ .../clusters_v1beta1_cassandra.yaml | 37 + .../onpremises/clusters_v1beta1_kafka.yaml | 40 + .../clusters_v1beta1_kafkaconnect.yaml | 25 + .../clusters_v1beta1_postgresql.yaml | 52 ++ .../onpremises/clusters_v1beta1_redis.yaml | 32 + .../awsendpointserviceprincipal_controller.go | 2 +- .../clusterbackup_controller.go | 2 +- controllers/clusters/cadence_controller.go | 715 +++++++++------ controllers/clusters/cassandra_controller.go | 614 ++++++++----- controllers/clusters/helpers.go | 27 +- controllers/clusters/kafka_controller.go | 394 +++++--- .../clusters/kafkaconnect_controller.go | 157 +++- controllers/clusters/on_premises.go | 861 ++++++++++++++++++ controllers/clusters/opensearch_controller.go | 2 +- controllers/clusters/postgresql_controller.go | 257 ++++-- controllers/clusters/redis_controller.go | 217 +++-- main.go | 9 +- pkg/models/on_premises.go | 93 ++ pkg/models/operator.go | 2 +- pkg/models/validation.go | 2 + pkg/scheduler/scheduler.go | 9 +- scripts/cloud-init-script-example.sh | 13 + scripts/cloud-init-secret.yaml | 6 + 47 files changed, 3726 insertions(+), 897 deletions(-) create mode 100644 config/samples/onpremises/clusters_v1beta1_cadence.yaml create mode 100644 config/samples/onpremises/clusters_v1beta1_cassandra.yaml create mode 100644 config/samples/onpremises/clusters_v1beta1_kafka.yaml create mode 100644 config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml create mode 100644 config/samples/onpremises/clusters_v1beta1_postgresql.yaml create mode 100644 config/samples/onpremises/clusters_v1beta1_redis.yaml create mode 100644 controllers/clusters/on_premises.go create mode 100644 pkg/models/on_premises.go create mode 100644 scripts/cloud-init-script-example.sh create mode 100644 scripts/cloud-init-secret.yaml diff --git a/apis/clusters/v1beta1/cadence_types.go b/apis/clusters/v1beta1/cadence_types.go index f3b394f27..ff5edd2b7 100644 --- a/apis/clusters/v1beta1/cadence_types.go +++ b/apis/clusters/v1beta1/cadence_types.go @@ -22,9 +22,11 @@ import ( "fmt" "regexp" + k8scorev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/pkg/models" @@ -62,7 +64,8 @@ type BundledOpenSearchSpec struct { // CadenceSpec defines the desired state of Cadence type CadenceSpec struct { - Cluster `json:",inline"` + Cluster `json:",inline"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` //+kubebuilder:validation:MinItems:=1 //+kubebuilder:validation:MaxItems:=1 DataCentres []*CadenceDataCentre `json:"dataCentres"` @@ -793,3 +796,64 @@ func (o *BundledOpenSearchSpec) validate() error { return nil } + +func (c *Cadence) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !c.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.CadenceTChannel, + Port: models.Port7933, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7933, + }, + }, + { + Name: models.CadenceWeb, + Port: models.Port8088, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port8088, + }, + }, + } + if c.Spec.DataCentres[0].ClientEncryption { + sslPort := k8scorev1.ServicePort{ + Name: models.CadenceGRPC, + Port: models.Port7833, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7833, + }, + } + exposePorts = append(exposePorts, sslPort) + } + } + return exposePorts +} + +func (c *Cadence) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.CadenceTChannel, + Port: models.Port7933, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7933, + }, + }, + } + if c.Spec.DataCentres[0].ClientEncryption { + sslPort := k8scorev1.ServicePort{ + Name: models.CadenceGRPC, + Port: models.Port7833, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7833, + }, + } + headlessPorts = append(headlessPorts, sslPort) + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/cadence_webhook.go b/apis/clusters/v1beta1/cadence_webhook.go index f7fe05572..7684c2eaa 100644 --- a/apis/clusters/v1beta1/cadence_webhook.go +++ b/apis/clusters/v1beta1/cadence_webhook.go @@ -81,6 +81,19 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje return err } + if c.Spec.OnPremisesSpec != nil { + err = c.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if c.Spec.PrivateNetworkCluster { + err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + appVersions, err := cv.API.ListAppVersions(models.CadenceAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -169,10 +182,22 @@ func (cv *cadenceValidator) ValidateCreate(ctx context.Context, obj runtime.Obje return fmt.Errorf("data centres field is empty") } + //TODO: add support of multiple DCs for OnPrem clusters + if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range c.Spec.DataCentres { - err := dc.DataCentre.ValidateCreation() - if err != nil { - return err + if c.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } if !c.Spec.PrivateNetworkCluster && dc.PrivateLink != nil { diff --git a/apis/clusters/v1beta1/cassandra_types.go b/apis/clusters/v1beta1/cassandra_types.go index cd19c9374..ece5da80c 100644 --- a/apis/clusters/v1beta1/cassandra_types.go +++ b/apis/clusters/v1beta1/cassandra_types.go @@ -21,7 +21,9 @@ import ( "fmt" "strconv" + k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -56,6 +58,7 @@ type CassandraRestoreFrom struct { // CassandraSpec defines the desired state of Cassandra type CassandraSpec struct { RestoreFrom *CassandraRestoreFrom `json:"restoreFrom,omitempty"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` Cluster `json:",inline"` DataCentres []*CassandraDataCentre `json:"dataCentres,omitempty"` LuceneEnabled bool `json:"luceneEnabled,omitempty"` @@ -522,3 +525,80 @@ func (c *Cassandra) SetClusterID(id string) { func init() { SchemeBuilder.Register(&Cassandra{}, &CassandraList{}) } + +func (c *Cassandra) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !c.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.CassandraInterNode, + Port: models.Port7000, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7000, + }, + }, + { + Name: models.CassandraCQL, + Port: models.Port9042, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9042, + }, + }, + { + Name: models.CassandraJMX, + Port: models.Port7199, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7199, + }, + }, + } + if c.Spec.DataCentres[0].ClientToClusterEncryption { + sslPort := k8scorev1.ServicePort{ + Name: models.CassandraSSL, + Port: models.Port7001, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7001, + }, + } + exposePorts = append(exposePorts, sslPort) + } + } + return exposePorts +} + +func (c *Cassandra) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.CassandraInterNode, + Port: models.Port7000, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7000, + }, + }, + { + Name: models.CassandraCQL, + Port: models.Port9042, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9042, + }, + }, + } + if c.Spec.DataCentres[0].ClientToClusterEncryption { + sslPort := k8scorev1.ServicePort{ + Name: models.CassandraSSL, + Port: models.Port7001, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port7001, + }, + } + headlessPorts = append(headlessPorts, sslPort) + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/cassandra_webhook.go b/apis/clusters/v1beta1/cassandra_webhook.go index 4d4a6fef6..db2998f2f 100644 --- a/apis/clusters/v1beta1/cassandra_webhook.go +++ b/apis/clusters/v1beta1/cassandra_webhook.go @@ -91,6 +91,19 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return fmt.Errorf("spark should not have more than 1 item") } + if c.Spec.OnPremisesSpec != nil { + err = c.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if c.Spec.PrivateNetworkCluster { + err = c.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + appVersions, err := cv.API.ListAppVersions(models.CassandraAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -113,10 +126,22 @@ func (cv *cassandraValidator) ValidateCreate(ctx context.Context, obj runtime.Ob return fmt.Errorf("data centres field is empty") } + //TODO: add support of multiple DCs for OnPrem clusters + if len(c.Spec.DataCentres) > 1 && c.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range c.Spec.DataCentres { - err := dc.DataCentre.ValidateCreation() - if err != nil { - return err + if c.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } if !c.Spec.PrivateNetworkCluster && dc.PrivateIPBroadcastForDiscovery { diff --git a/apis/clusters/v1beta1/kafka_types.go b/apis/clusters/v1beta1/kafka_types.go index d4c51261c..2af6c17e1 100644 --- a/apis/clusters/v1beta1/kafka_types.go +++ b/apis/clusters/v1beta1/kafka_types.go @@ -19,7 +19,9 @@ package v1beta1 import ( "encoding/json" + k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/pkg/models" @@ -63,6 +65,7 @@ type KarapaceSchemaRegistry struct { // KafkaSpec defines the desired state of Kafka type KafkaSpec struct { Cluster `json:",inline"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` SchemaRegistry []*SchemaRegistry `json:"schemaRegistry,omitempty"` // ReplicationFactor to use for new topic. @@ -484,3 +487,72 @@ func (k *Kafka) GetClusterID() string { func (k *Kafka) SetClusterID(id string) { k.Status.ID = id } + +func (k *Kafka) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !k.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.KafkaClient, + Port: models.Port9092, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9092, + }, + }, + { + Name: models.KafkaControlPlane, + Port: models.Port9093, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9093, + }, + }, + } + if k.Spec.ClientToClusterEncryption { + sslPort := k8scorev1.ServicePort{ + Name: models.KafkaBroker, + Port: models.Port9094, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9094, + }, + } + exposePorts = append(exposePorts, sslPort) + } + } + return exposePorts +} + +func (k *Kafka) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.KafkaClient, + Port: models.Port9092, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9092, + }, + }, + { + Name: models.KafkaControlPlane, + Port: models.Port9093, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9093, + }, + }, + } + if k.Spec.ClientToClusterEncryption { + kafkaBrokerPort := k8scorev1.ServicePort{ + Name: models.KafkaBroker, + Port: models.Port9094, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port9094, + }, + } + headlessPorts = append(headlessPorts, kafkaBrokerPort) + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/kafka_webhook.go b/apis/clusters/v1beta1/kafka_webhook.go index 47971e2ab..7c1d65c4e 100644 --- a/apis/clusters/v1beta1/kafka_webhook.go +++ b/apis/clusters/v1beta1/kafka_webhook.go @@ -81,6 +81,19 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object return err } + if k.Spec.OnPremisesSpec != nil { + err = k.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if k.Spec.PrivateNetworkCluster { + err = k.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + appVersions, err := kv.API.ListAppVersions(models.KafkaAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -96,10 +109,22 @@ func (kv *kafkaValidator) ValidateCreate(ctx context.Context, obj runtime.Object return models.ErrZeroDataCentres } + //TODO: add support of multiple DCs for OnPrem clusters + if len(k.Spec.DataCentres) > 1 && k.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range k.Spec.DataCentres { - err := dc.DataCentre.ValidateCreation() - if err != nil { - return err + if k.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } if len(dc.PrivateLink) > 1 { diff --git a/apis/clusters/v1beta1/kafkaconnect_types.go b/apis/clusters/v1beta1/kafkaconnect_types.go index a101ee454..9c2e99f22 100644 --- a/apis/clusters/v1beta1/kafkaconnect_types.go +++ b/apis/clusters/v1beta1/kafkaconnect_types.go @@ -20,8 +20,10 @@ import ( "encoding/json" "fmt" + k8scorev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/instaclustr/operator/pkg/models" @@ -105,9 +107,10 @@ type KafkaConnectDataCentre struct { // KafkaConnectSpec defines the desired state of KafkaConnect type KafkaConnectSpec struct { - Cluster `json:",inline"` - DataCentres []*KafkaConnectDataCentre `json:"dataCentres"` - TargetCluster []*TargetCluster `json:"targetCluster"` + Cluster `json:",inline"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` + DataCentres []*KafkaConnectDataCentre `json:"dataCentres"` + TargetCluster []*TargetCluster `json:"targetCluster"` // CustomConnectors defines the location for custom connector storage and access info. CustomConnectors []*CustomConnectors `json:"customConnectors,omitempty"` @@ -720,3 +723,34 @@ func (k *KafkaConnect) NewDefaultUserSecret(username, password string) *v1.Secre }, } } + +func (k *KafkaConnect) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !k.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.KafkaConnectAPI, + Port: models.Port8083, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port8083, + }, + }, + } + } + return exposePorts +} + +func (k *KafkaConnect) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.KafkaConnectAPI, + Port: models.Port8083, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port8083, + }, + }, + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/kafkaconnect_webhook.go b/apis/clusters/v1beta1/kafkaconnect_webhook.go index c239c54e9..eae339c07 100644 --- a/apis/clusters/v1beta1/kafkaconnect_webhook.go +++ b/apis/clusters/v1beta1/kafkaconnect_webhook.go @@ -82,6 +82,19 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return err } + if kc.Spec.OnPremisesSpec != nil { + err = kc.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if kc.Spec.PrivateNetworkCluster { + err = kc.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + appVersions, err := kcv.API.ListAppVersions(models.KafkaConnectAppKind) if err != nil { return fmt.Errorf("cannot list versions for kind: %v, err: %w", @@ -93,10 +106,6 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return err } - if len(kc.Spec.DataCentres) == 0 { - return fmt.Errorf("data centres field is empty") - } - if len(kc.Spec.TargetCluster) > 1 { return fmt.Errorf("targetCluster array size must be between 0 and 1") } @@ -126,10 +135,26 @@ func (kcv *kafkaConnectValidator) ValidateCreate(ctx context.Context, obj runtim return fmt.Errorf("customConnectors array size must be between 0 and 1") } + if len(kc.Spec.DataCentres) == 0 { + return fmt.Errorf("data centres field is empty") + } + + //TODO: add support of multiple DCs for OnPrem clusters + if len(kc.Spec.DataCentres) > 1 && kc.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range kc.Spec.DataCentres { - err := dc.ValidateCreation() - if err != nil { - return err + if kc.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } err = validateReplicationFactor(models.KafkaConnectReplicationFactors, dc.ReplicationFactor) diff --git a/apis/clusters/v1beta1/postgresql_types.go b/apis/clusters/v1beta1/postgresql_types.go index bde017933..4aae8eb93 100644 --- a/apis/clusters/v1beta1/postgresql_types.go +++ b/apis/clusters/v1beta1/postgresql_types.go @@ -24,9 +24,11 @@ import ( "unicode" k8sCore "k8s.io/api/core/v1" + k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -75,6 +77,7 @@ type PgRestoreFrom struct { type PgSpec struct { PgRestoreFrom *PgRestoreFrom `json:"pgRestoreFrom,omitempty"` Cluster `json:",inline"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` DataCentres []*PgDataCentre `json:"dataCentres,omitempty"` ClusterConfigurations map[string]string `json:"clusterConfigurations,omitempty"` SynchronousModeStrict bool `json:"synchronousModeStrict,omitempty"` @@ -645,3 +648,34 @@ func GetDefaultPgUserSecret( return userSecret, nil } + +func (pg *PostgreSQL) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !pg.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.PostgreSQLDB, + Port: models.Port5432, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port5432, + }, + }, + } + } + return exposePorts +} + +func (pg *PostgreSQL) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.PostgreSQLDB, + Port: models.Port5432, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port5432, + }, + }, + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/postgresql_webhook.go b/apis/clusters/v1beta1/postgresql_webhook.go index 51f567063..8dd560073 100644 --- a/apis/clusters/v1beta1/postgresql_webhook.go +++ b/apis/clusters/v1beta1/postgresql_webhook.go @@ -91,6 +91,19 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) return err } + if pg.Spec.OnPremisesSpec != nil { + err = pg.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if pg.Spec.PrivateNetworkCluster { + err = pg.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + if pg.Spec.UserRefs != nil { err = pgv.validatePostgreSQLUsers(ctx, pg) if err != nil { @@ -113,10 +126,22 @@ func (pgv *pgValidator) ValidateCreate(ctx context.Context, obj runtime.Object) return models.ErrZeroDataCentres } + //TODO: add support of multiple DCs for OnPrem clusters + if len(pg.Spec.DataCentres) > 1 && pg.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range pg.Spec.DataCentres { - err = dc.DataCentre.ValidateCreation() - if err != nil { - return err + if pg.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } err = dc.ValidatePGBouncer() diff --git a/apis/clusters/v1beta1/redis_types.go b/apis/clusters/v1beta1/redis_types.go index 02c1df4b4..34c5d9624 100644 --- a/apis/clusters/v1beta1/redis_types.go +++ b/apis/clusters/v1beta1/redis_types.go @@ -21,7 +21,9 @@ import ( "fmt" "strconv" + k8scorev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -60,8 +62,9 @@ type RedisRestoreFrom struct { // RedisSpec defines the desired state of Redis type RedisSpec struct { - RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"` - Cluster `json:",inline"` + RestoreFrom *RedisRestoreFrom `json:"restoreFrom,omitempty"` + Cluster `json:",inline"` + OnPremisesSpec *OnPremisesSpec `json:"onPremisesSpec,omitempty"` // Enables client to node encryption ClientEncryption bool `json:"clientEncryption,omitempty"` @@ -492,3 +495,50 @@ func (r *Redis) SetClusterID(id string) { func init() { SchemeBuilder.Register(&Redis{}, &RedisList{}) } + +func (r *Redis) GetExposePorts() []k8scorev1.ServicePort { + var exposePorts []k8scorev1.ServicePort + if !r.Spec.PrivateNetworkCluster { + exposePorts = []k8scorev1.ServicePort{ + { + Name: models.RedisDB, + Port: models.Port6379, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port6379, + }, + }, + { + Name: models.RedisBus, + Port: models.Port16379, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port16379, + }, + }, + } + } + return exposePorts +} + +func (r *Redis) GetHeadlessPorts() []k8scorev1.ServicePort { + headlessPorts := []k8scorev1.ServicePort{ + { + Name: models.RedisDB, + Port: models.Port6379, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port6379, + }, + }, + { + Name: models.RedisBus, + Port: models.Port16379, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port16379, + }, + }, + } + return headlessPorts +} diff --git a/apis/clusters/v1beta1/redis_webhook.go b/apis/clusters/v1beta1/redis_webhook.go index 09c48b48e..eed414de2 100644 --- a/apis/clusters/v1beta1/redis_webhook.go +++ b/apis/clusters/v1beta1/redis_webhook.go @@ -92,6 +92,19 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object return err } + if r.Spec.OnPremisesSpec != nil { + err = r.Spec.OnPremisesSpec.ValidateCreation() + if err != nil { + return err + } + if r.Spec.PrivateNetworkCluster { + err = r.Spec.OnPremisesSpec.ValidateSSHGatewayCreation() + if err != nil { + return err + } + } + } + err = r.Spec.ValidatePrivateLink() if err != nil { return err @@ -112,10 +125,21 @@ func (rv *redisValidator) ValidateCreate(ctx context.Context, obj runtime.Object return fmt.Errorf("data centres field is empty") } + if len(r.Spec.DataCentres) > 1 && r.Spec.OnPremisesSpec != nil { + return fmt.Errorf("on-premises cluster can be provisioned with only one data centre") + } + for _, dc := range r.Spec.DataCentres { - err = dc.ValidateCreate() - if err != nil { - return err + if r.Spec.OnPremisesSpec != nil { + err := dc.DataCentre.ValidateOnPremisesCreation() + if err != nil { + return err + } + } else { + err := dc.DataCentre.ValidateCreation() + if err != nil { + return err + } } if !r.Spec.PrivateNetworkCluster && dc.PrivateLink != nil { diff --git a/apis/clusters/v1beta1/structs.go b/apis/clusters/v1beta1/structs.go index f8444df55..dbc230a38 100644 --- a/apis/clusters/v1beta1/structs.go +++ b/apis/clusters/v1beta1/structs.go @@ -121,6 +121,18 @@ type ClusteredMaintenanceEvent struct { Upcoming []*clusterresource.MaintenanceEventStatus `json:"upcoming"` } +type OnPremisesSpec struct { + StorageClassName string `json:"storageClassName"` + OSDiskSize string `json:"osDiskSize"` + DataDiskSize string `json:"dataDiskSize"` + SSHGatewayCPU int64 `json:"sshGatewayCPU,omitempty"` + SSHGatewayMemory string `json:"sshGatewayMemory,omitempty"` + NodeCPU int64 `json:"nodeCPU"` + NodeMemory string `json:"nodeMemory"` + OSImageURL string `json:"osImageURL"` + CloudInitScriptRef *Reference `json:"cloudInitScriptRef"` +} + type TwoFactorDelete struct { // Email address which will be contacted when the cluster is requested to be deleted. Email string `json:"email"` diff --git a/apis/clusters/v1beta1/validation.go b/apis/clusters/v1beta1/validation.go index 6eb8e742f..e172fe321 100644 --- a/apis/clusters/v1beta1/validation.go +++ b/apis/clusters/v1beta1/validation.go @@ -94,6 +94,40 @@ func (dc *DataCentre) ValidateCreation() error { return nil } +func (ops *OnPremisesSpec) ValidateCreation() error { + osDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(ops.OSDiskSize)) + if !osDiskSizeMatched || err != nil { + return fmt.Errorf("disk size field for node OS must fit pattern: %s", + models.StorageRegExp) + } + + dataDiskSizeMatched, err := regexp.Match(models.StorageRegExp, []byte(ops.DataDiskSize)) + if !dataDiskSizeMatched || err != nil { + return fmt.Errorf("disk size field for storring cluster data must fit pattern: %s", + models.StorageRegExp) + } + + nodeMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(ops.DataDiskSize)) + if !nodeMemoryMatched || err != nil { + return fmt.Errorf("node memory field must fit pattern: %s", + models.MemoryRegExp) + } + + return nil +} + +func (ops *OnPremisesSpec) ValidateSSHGatewayCreation() error { + if ops.SSHGatewayCPU == 0 || ops.SSHGatewayMemory == "" { + return fmt.Errorf("fields SSHGatewayCPU and SSHGatewayMemory must not be empty") + } + sshGatewayMemoryMatched, err := regexp.Match(models.MemoryRegExp, []byte(ops.DataDiskSize)) + if !sshGatewayMemoryMatched || err != nil { + return fmt.Errorf("ssh gateway memory field must fit pattern: %s", + models.MemoryRegExp) + } + return nil +} + func (dc *DataCentre) validateImmutableCloudProviderSettingsUpdate(oldSettings []*CloudProviderSettings) error { if len(oldSettings) != len(dc.CloudProviderSettings) { return models.ErrImmutableCloudProviderSettings @@ -217,3 +251,17 @@ func validateSingleConcurrentResize(concurrentResizes int) error { return nil } + +func (dc *DataCentre) ValidateOnPremisesCreation() error { + if dc.CloudProvider != models.ONPREMISES { + return fmt.Errorf("cloud provider %s is unavailable for data centre: %s, available value: %s", + dc.CloudProvider, dc.Name, models.ONPREMISES) + } + + if dc.Region != models.CLIENTDC { + return fmt.Errorf("region %s is unavailable for data centre: %s, available value: %s", + dc.Region, dc.Name, models.CLIENTDC) + } + + return nil +} diff --git a/apis/clusters/v1beta1/zz_generated.deepcopy.go b/apis/clusters/v1beta1/zz_generated.deepcopy.go index a0cc86ca2..f53b2bd4c 100644 --- a/apis/clusters/v1beta1/zz_generated.deepcopy.go +++ b/apis/clusters/v1beta1/zz_generated.deepcopy.go @@ -232,6 +232,11 @@ func (in *CadenceList) DeepCopyObject() runtime.Object { func (in *CadenceSpec) DeepCopyInto(out *CadenceSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*CadenceDataCentre, len(*in)) @@ -457,6 +462,11 @@ func (in *CassandraSpec) DeepCopyInto(out *CassandraSpec) { *out = new(CassandraRestoreFrom) (*in).DeepCopyInto(*out) } + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } in.Cluster.DeepCopyInto(&out.Cluster) if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres @@ -997,6 +1007,11 @@ func (in *KafkaConnectList) DeepCopyObject() runtime.Object { func (in *KafkaConnectSpec) DeepCopyInto(out *KafkaConnectSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*KafkaConnectDataCentre, len(*in)) @@ -1121,6 +1136,11 @@ func (in *KafkaList) DeepCopyObject() runtime.Object { func (in *KafkaSpec) DeepCopyInto(out *KafkaSpec) { *out = *in in.Cluster.DeepCopyInto(&out.Cluster) + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } if in.SchemaRegistry != nil { in, out := &in.SchemaRegistry, &out.SchemaRegistry *out = make([]*SchemaRegistry, len(*in)) @@ -1339,6 +1359,26 @@ func (in *Node) DeepCopy() *Node { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnPremisesSpec) DeepCopyInto(out *OnPremisesSpec) { + *out = *in + if in.CloudInitScriptRef != nil { + in, out := &in.CloudInitScriptRef, &out.CloudInitScriptRef + *out = new(Reference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnPremisesSpec. +func (in *OnPremisesSpec) DeepCopy() *OnPremisesSpec { + if in == nil { + return nil + } + out := new(OnPremisesSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenSearch) DeepCopyInto(out *OpenSearch) { *out = *in @@ -1791,6 +1831,11 @@ func (in *PgSpec) DeepCopyInto(out *PgSpec) { (*in).DeepCopyInto(*out) } in.Cluster.DeepCopyInto(&out.Cluster) + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*PgDataCentre, len(*in)) @@ -2099,6 +2144,11 @@ func (in *RedisSpec) DeepCopyInto(out *RedisSpec) { (*in).DeepCopyInto(*out) } in.Cluster.DeepCopyInto(&out.Cluster) + if in.OnPremisesSpec != nil { + in, out := &in.OnPremisesSpec, &out.OnPremisesSpec + *out = new(OnPremisesSpec) + (*in).DeepCopyInto(*out) + } if in.DataCentres != nil { in, out := &in.DataCentres, &out.DataCentres *out = make([]*RedisDataCentre, len(*in)) diff --git a/config/crd/bases/clusters.instaclustr.com_cadences.yaml b/config/crd/bases/clusters.instaclustr.com_cadences.yaml index 1fd0ba456..d83957145 100644 --- a/config/crd/bases/clusters.instaclustr.com_cadences.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cadences.yaml @@ -125,6 +125,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object packagedProvisioning: items: properties: diff --git a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml index b92d77a37..63270db0a 100644 --- a/config/crd/bases/clusters.instaclustr.com_cassandras.yaml +++ b/config/crd/bases/clusters.instaclustr.com_cassandras.yaml @@ -109,6 +109,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object passwordAndUserAuth: type: boolean pciCompliance: diff --git a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml index 87209b6b0..8269e6d47 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkaconnects.yaml @@ -179,6 +179,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object pciCompliance: description: The PCI compliance standards relate to the security of user data and transactional information. Can only be applied clusters diff --git a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml index a6e3de764..4139e9677 100644 --- a/config/crd/bases/clusters.instaclustr.com_kafkas.yaml +++ b/config/crd/bases/clusters.instaclustr.com_kafkas.yaml @@ -165,6 +165,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object partitionsNumber: description: PartitionsNumber number of partitions to use when created new topics. @@ -264,6 +303,18 @@ spec: status: description: KafkaStatus defines the observed state of Kafka properties: + availableUsers: + items: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + type: array cdcid: type: string currentClusterOperationStatus: diff --git a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml index 5ec93a122..6bc581752 100644 --- a/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml +++ b/config/crd/bases/clusters.instaclustr.com_postgresqls.yaml @@ -132,6 +132,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object pciCompliance: description: The PCI compliance standards relate to the security of user data and transactional information. Can only be applied clusters diff --git a/config/crd/bases/clusters.instaclustr.com_redis.yaml b/config/crd/bases/clusters.instaclustr.com_redis.yaml index df4104371..d297d0347 100644 --- a/config/crd/bases/clusters.instaclustr.com_redis.yaml +++ b/config/crd/bases/clusters.instaclustr.com_redis.yaml @@ -115,6 +115,45 @@ spec: name: description: Name [ 3 .. 32 ] characters. type: string + onPremisesSpec: + properties: + cloudInitScriptRef: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + dataDiskSize: + type: string + nodeCPU: + format: int64 + type: integer + nodeMemory: + type: string + osDiskSize: + type: string + osImageURL: + type: string + sshGatewayCPU: + format: int64 + type: integer + sshGatewayMemory: + type: string + storageClassName: + type: string + required: + - cloudInitScriptRef + - dataDiskSize + - nodeCPU + - nodeMemory + - osDiskSize + - osImageURL + - storageClassName + type: object passwordAndUserAuth: type: boolean pciCompliance: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index ee225136e..167097051 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -32,6 +32,31 @@ rules: - get - list - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -39,6 +64,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -51,6 +77,20 @@ rules: verbs: - create - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - cdi.kubevirt.io + resources: + - datavolumes + verbs: + - create + - delete + - deletecollection - get - list - patch @@ -796,3 +836,29 @@ rules: - get - patch - update +- apiGroups: + - kubevirt.io + resources: + - virtualmachineinstances + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - kubevirt.io + resources: + - virtualmachines + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/samples/clusters_v1beta1_cassandra.yaml b/config/samples/clusters_v1beta1_cassandra.yaml index b0e1cb1a0..2119a7c3a 100644 --- a/config/samples/clusters_v1beta1_cassandra.yaml +++ b/config/samples/clusters_v1beta1_cassandra.yaml @@ -37,20 +37,19 @@ spec: pciCompliance: false luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra passwordAndUserAuth: true -# userRefs: -# - namespace: default -# name: cassandrauser-sample -# - namespace: default -# name: cassandrauser-sample2 -# - namespace: default -# name: cassandrauser-sample3 + # userRefs: + # - namespace: default + # name: cassandrauser-sample + # - namespace: default + # name: cassandrauser-sample2 + # - namespace: default + # name: cassandrauser-sample3 slaTier: "NON_PRODUCTION" -# resizeSettings: -# - notifySupportContacts: false -# concurrency: 2 -# description: "this is a sample of description" -# twoFactorDelete: -# - email: "rostyslp@netapp.com" - #spark: - # - version: "2.3.2" # 3.0.1 for 4.0.4 version of Cassandra | 2.3.2 for 3.11.13 version of Cassandra - + # resizeSettings: + # - notifySupportContacts: false + # concurrency: 2 + # description: "this is a sample of description" + # twoFactorDelete: + # - email: "rostyslp@netapp.com" + #spark: + # - version: "2.3.2" # 3.0.1 for 4.0.4 version of Cassandra | 2.3.2 for 3.11.13 version of Cassandra \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_cadence.yaml b/config/samples/onpremises/clusters_v1beta1_cadence.yaml new file mode 100644 index 000000000..72b302978 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_cadence.yaml @@ -0,0 +1,63 @@ +#apiVersion: v1 +#kind: Secret +#metadata: +# name: inst-test-aws-cred-secret +#data: +# awsAccessKeyId: access_key +# awsSecretAccessKey: secret_key +#--- +apiVersion: clusters.instaclustr.com/v1beta1 +kind: Cadence +metadata: + name: cadence-sample +spec: + name: "username-test" + version: "1.0.0" + standardProvisioning: + - targetCassandra: + dependencyCdcId: "9d43ac54-7317-4ce5-859a-e9d0443508a4" + dependencyVpcType: "VPC_PEERED" +# packagedProvisioning: +# - bundledCassandraSpec: +# nodeSize: "CAS-DEV-t4g.small-5" +# network: "10.2.0.0/16" +# replicationFactor: 3 +# nodesNumber: 3 +# privateIPBroadcastForDiscovery: false +# passwordAndUserAuth: true +# useAdvancedVisibility: true +# bundledKafkaSpec: +# nodeSize: "KFK-DEV-t4g.small-5" +# nodesNumber: 3 +# network: "10.3.0.0/16" +# replicationFactor: 3 +# partitionsNumber: 3 +# bundledOpenSearchSpec: +# nodeSize: "SRH-DEV-t4g.small-5" +# replicationFactor: 3 +# network: "10.4.0.0/16" + # twoFactorDelete: + # - email: "rostyslp@netapp.com" + privateNetworkCluster: false + dataCentres: + - region: "US_EAST_2" + network: "10.12.0.0/16" + # if you use multi-region mode please provide + # non-overlapping CIDR block for the secondary mode cluster +# network: "10.16.0.0/16" + cloudProvider: "AWS_VPC" + name: "testdc" +# nodeSize: "CAD-PRD-m5ad.large-75" + nodeSize: "CAD-DEV-t3.small-5" + nodesNumber: 2 + clientEncryption: false +# privateLink: +# - advertisedHostname: "cadence-sample-test.com" + slaTier: "NON_PRODUCTION" + useCadenceWebAuth: false +# targetPrimaryCadence: +# - dependencyCdcId: "cce79be3-7f41-4cad-837c-86d3d8b4be77" +# dependencyVpcType: "SEPARATE_VPC" + resizeSettings: + - notifySupportContacts: false + concurrency: 1 \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml new file mode 100644 index 000000000..8b7968872 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml @@ -0,0 +1,37 @@ +apiVersion: clusters.instaclustr.com/v1beta1 +kind: Cassandra +metadata: + name: cassandra-on-prem-cluster +spec: + name: "danylo-on-prem-cassandra" + version: "4.0.10" + privateNetworkCluster: false + onPremisesSpec: + storageClassName: managed-csi-premium + osDiskSize: 20Gi + dataDiskSize: 200Gi + sshGatewayCPU: 2 + sshGatewayMemory: 4096Mi + nodeCPU: 2 + nodeMemory: 8192Mi + osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" + cloudInitScriptRef: + namespace: default + name: instaclustr-cloud-init-secret + dataCentres: + - name: "onPremCassandra" + region: "CLIENT_DC" + cloudProvider: "ONPREMISES" + continuousBackup: false + nodesNumber: 2 + replicationFactor: 2 + privateIpBroadcastForDiscovery: false + network: "192.168.0.0/16" + tags: + "onprem": "test" + clientToClusterEncryption: false + nodeSize: "CAS-PRD-OP.4.8-200" + pciCompliance: false + luceneEnabled: false # can be enabled only on 3.11.13 version of Cassandra + passwordAndUserAuth: false + slaTier: "NON_PRODUCTION" diff --git a/config/samples/onpremises/clusters_v1beta1_kafka.yaml b/config/samples/onpremises/clusters_v1beta1_kafka.yaml new file mode 100644 index 000000000..79f5574b6 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_kafka.yaml @@ -0,0 +1,40 @@ +apiVersion: clusters.instaclustr.com/v1beta1 +kind: Kafka +metadata: + name: danylo-kafka +spec: + name: "danylo-kafka" + version: "3.3.1" + pciCompliance: false + replicationFactor: 3 + partitionsNumber: 3 + allowDeleteTopics: true + autoCreateTopics: true + clientToClusterEncryption: false + privateNetworkCluster: false + slaTier: "NON_PRODUCTION" + onPremisesSpec: + storageClassName: managed-csi-premium + osDiskSize: 20Gi + dataDiskSize: 200Gi + sshGatewayCPU: 2 + sshGatewayMemory: 4096Mi + nodeCPU: 2 + nodeMemory: 8192Mi + osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" + cloudInitScriptRef: + namespace: default + name: instaclustr-cloud-init-secret + dataCentres: + - name: "onPremKafka" + nodesNumber: 3 + cloudProvider: "ONPREMISES" + tags: + tag: "oneTag" + tag2: "twoTags" + nodeSize: "KFK-DEV-OP.4.8-200" + network: "10.0.0.0/16" + region: "CLIENT_DC" + resizeSettings: + - notifySupportContacts: false + concurrency: 1 \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml new file mode 100644 index 000000000..0b6a840a0 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_kafkaconnect.yaml @@ -0,0 +1,25 @@ +apiVersion: clusters.instaclustr.com/v1beta1 +kind: KafkaConnect +metadata: + name: kafkaconnect-sample +spec: + dataCentres: + - name: "US_EAST_1_DC_KAFKA" + nodesNumber: 3 +# nodesNumber: 6 + cloudProvider: "AWS_VPC" + replicationFactor: 3 + tags: + tag: "oneTag" + tag2: "twoTags" + nodeSize: "KCN-DEV-t4g.medium-30" + network: "10.15.0.0/16" + region: "US_EAST_1" + name: "Username-KC" + version: "3.1.2" + privateNetworkCluster: false + slaTier: "NON_PRODUCTION" + targetCluster: + - managedCluster: + - targetKafkaClusterId: "34dfc53c-c8c1-4be8-bd2f-cfdb77ec7349" + kafkaConnectVpcType: "KAFKA_VPC" diff --git a/config/samples/onpremises/clusters_v1beta1_postgresql.yaml b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml new file mode 100644 index 000000000..11c3b5859 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_postgresql.yaml @@ -0,0 +1,52 @@ +apiVersion: clusters.instaclustr.com/v1beta1 +kind: PostgreSQL +metadata: + name: postgresql-sample +# TODO https://github.com/instaclustr/operator/issues/472 +# annotations: +# testAnnotation: test +spec: + name: "username-test" + version: "15.4.0" + dataCentres: + - region: "US_WEST_2" + network: "10.1.0.0/16" + cloudProvider: "AWS_VPC" +# nodeSize: "PGS-DEV-t4g.medium-30" + nodeSize: "PGS-DEV-t4g.small-5" + nodesNumber: 2 + clientEncryption: false + name: "testDC1" + intraDataCentreReplication: + - replicationMode: "SYNCHRONOUS" + interDataCentreReplication: + - isPrimaryDataCentre: true + # - region: "US_WEST_2" + # network: "10.2.0.0/16" + # cloudProvider: "AWS_VPC" + ## nodeSize: "PGS-DEV-t4g.medium-30" + # nodeSize: "PGS-DEV-t4g.small-5" + # racksNumber: 2 + # nodesNumber: 1 + # postgresqlNodeCount: 2 + # clientEncryption: false + # name: "testDC2" + # intraDataCentreReplication: + # - replicationMode: "ASYNCHRONOUS" + # interDataCentreReplication: + # - isPrimaryDataCentre: false + # clusterConfigurations: + # idle_in_transaction_session_timeout: "2" + # statement_timeout: "1" + # twoFactorDelete: + # - email: "rostyslp@netapp.com" + # description: "test 222" + slaTier: "NON_PRODUCTION" +# userRefs: +# - namespace: default +# name: postgresqluser-sample + privateNetworkCluster: false + synchronousModeStrict: false +# resizeSettings: +# - notifySupportContacts: false +# concurrency: 1 \ No newline at end of file diff --git a/config/samples/onpremises/clusters_v1beta1_redis.yaml b/config/samples/onpremises/clusters_v1beta1_redis.yaml new file mode 100644 index 000000000..d7f28edd2 --- /dev/null +++ b/config/samples/onpremises/clusters_v1beta1_redis.yaml @@ -0,0 +1,32 @@ +apiVersion: clusters.instaclustr.com/v1beta1 +kind: Redis +metadata: + name: danylo-redis +spec: + name: "danylo-redis" + version: "7.0.12" + onPremisesSpec: + storageClassName: managed-csi-premium + osDiskSize: 20Gi + dataDiskSize: 200Gi + sshGatewayCPU: 2 + sshGatewayMemory: 4096Mi + nodeCPU: 2 + nodeMemory: 8192Mi + osImageURL: "https://s3.amazonaws.com/debian-bucket/debian-11-generic-amd64-20230601-1398.raw" + cloudInitScriptRef: + namespace: default + name: instaclustr-cloud-init-secret + slaTier: "NON_PRODUCTION" + clientEncryption: false + passwordAndUserAuth: true + privateNetworkCluster: false + dataCentres: + - region: "CLIENT_DC" + name: "onPremRedis" + cloudProvider: "ONPREMISES" + network: "10.1.0.0/16" + nodeSize: "RDS-PRD-OP.8.64-400" + masterNodes: 3 + nodesNumber: 0 + replicationFactor: 0 \ No newline at end of file diff --git a/controllers/clusterresources/awsendpointserviceprincipal_controller.go b/controllers/clusterresources/awsendpointserviceprincipal_controller.go index c28c11b78..85f58b4b9 100644 --- a/controllers/clusterresources/awsendpointserviceprincipal_controller.go +++ b/controllers/clusterresources/awsendpointserviceprincipal_controller.go @@ -101,7 +101,7 @@ func (r *AWSEndpointServicePrincipalReconciler) handleCreate(ctx context.Context err = json.Unmarshal(b, &principal.Status) if err != nil { l.Error(err, "failed to parse an AWS endpoint service principal resource response from Instaclustr") - r.EventRecorder.Eventf(principal, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(principal, models.Warning, models.ConversionFailed, "Failed to parse an AWS endpoint service principal resource response from Instaclustr. Reason: %v", err, ) diff --git a/controllers/clusterresources/clusterbackup_controller.go b/controllers/clusterresources/clusterbackup_controller.go index aa25574f9..db42d76d2 100644 --- a/controllers/clusterresources/clusterbackup_controller.go +++ b/controllers/clusterresources/clusterbackup_controller.go @@ -172,7 +172,7 @@ func (r *ClusterBackupReconciler) Reconcile(ctx context.Context, req ctrl.Reques ) r.EventRecorder.Eventf( - backup, models.Warning, models.ConvertionFailed, + backup, models.Warning, models.ConversionFailed, "Start timestamp annotation convertion to int is failed. Reason: %v", err, ) diff --git a/controllers/clusters/cadence_controller.go b/controllers/clusters/cadence_controller.go index a22f993dd..ce4b61f91 100644 --- a/controllers/clusters/cadence_controller.go +++ b/controllers/clusters/cadence_controller.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/instaclustr/operator/apis/clusters/v1beta1" "github.com/instaclustr/operator/pkg/exposeservice" @@ -56,8 +57,15 @@ type CadenceReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cadences/finalizers,verbs=update -//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;patch //+kubebuilder:rbac:groups="",resources=events,verbs=create +//+kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -65,92 +73,92 @@ type CadenceReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile func (r *CadenceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) + l := log.FromContext(ctx) - cadenceCluster := &v1beta1.Cadence{} - err := r.Client.Get(ctx, req.NamespacedName, cadenceCluster) + c := &v1beta1.Cadence{} + err := r.Client.Get(ctx, req.NamespacedName, c) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Cadence resource is not found", + l.Info("Cadence resource is not found", "resource name", req.NamespacedName, ) return ctrl.Result{}, nil } - logger.Error(err, "Unable to fetch Cadence resource", + l.Error(err, "Unable to fetch Cadence resource", "resource name", req.NamespacedName, ) return ctrl.Result{}, err } - switch cadenceCluster.Annotations[models.ResourceStateAnnotation] { + switch c.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.HandleCreateCluster(ctx, cadenceCluster, logger) + return r.handleCreateCluster(ctx, c, l) case models.UpdatingEvent: - return r.HandleUpdateCluster(ctx, cadenceCluster, logger) + return r.handleUpdateCluster(ctx, c, l) case models.DeletingEvent: - return r.HandleDeleteCluster(ctx, cadenceCluster, logger) + return r.handleDeleteCluster(ctx, c, l) case models.GenericEvent: - logger.Info("Generic event isn't handled", + l.Info("Generic event isn't handled", "request", req, - "event", cadenceCluster.Annotations[models.ResourceStateAnnotation], + "event", c.Annotations[models.ResourceStateAnnotation], ) return ctrl.Result{}, nil default: - logger.Info("Unknown event isn't handled", + l.Info("Unknown event isn't handled", "request", req, - "event", cadenceCluster.Annotations[models.ResourceStateAnnotation], + "event", c.Annotations[models.ResourceStateAnnotation], ) return ctrl.Result{}, nil } } -func (r *CadenceReconciler) HandleCreateCluster( +func (r *CadenceReconciler) handleCreateCluster( ctx context.Context, - cadence *v1beta1.Cadence, - logger logr.Logger, + c *v1beta1.Cadence, + l logr.Logger, ) (ctrl.Result, error) { - if cadence.Status.ID == "" { - patch := cadence.NewPatch() + if c.Status.ID == "" { + patch := c.NewPatch() - for _, packagedProvisioning := range cadence.Spec.PackagedProvisioning { - requeueNeeded, err := r.preparePackagedSolution(ctx, cadence, packagedProvisioning) + for _, packagedProvisioning := range c.Spec.PackagedProvisioning { + requeueNeeded, err := r.preparePackagedSolution(ctx, c, packagedProvisioning) if err != nil { - logger.Error(err, "Cannot prepare packaged solution for Cadence cluster", - "cluster name", cadence.Spec.Name, + l.Error(err, "Cannot prepare packaged solution for Cadence cluster", + "cluster name", c.Spec.Name, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed, "Cannot prepare packaged solution for Cadence cluster. Reason: %v", err) return ctrl.Result{}, err } if requeueNeeded { - logger.Info("Waiting for bundled clusters to be created", - "cadence cluster name", cadence.Spec.Name) + l.Info("Waiting for bundled clusters to be created", + "c cluster name", c.Spec.Name) - r.EventRecorder.Event(cadence, models.Normal, "Waiting", + r.EventRecorder.Event(c, models.Normal, "Waiting", "Waiting for bundled clusters to be created") return models.ReconcileRequeue, nil } } - logger.Info( + l.Info( "Creating Cadence cluster", - "cluster name", cadence.Spec.Name, - "data centres", cadence.Spec.DataCentres, + "cluster name", c.Spec.Name, + "data centres", c.Spec.DataCentres, ) - cadenceAPISpec, err := cadence.Spec.ToInstAPI(ctx, r.Client) + cadenceAPISpec, err := c.Spec.ToInstAPI(ctx, r.Client) if err != nil { - logger.Error(err, "Cannot convert Cadence cluster manifest to API spec", - "cluster manifest", cadence.Spec) + l.Error(err, "Cannot convert Cadence cluster manifest to API spec", + "cluster manifest", c.Spec) - r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(c, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return ctrl.Result{}, err @@ -158,141 +166,218 @@ func (r *CadenceReconciler) HandleCreateCluster( id, err := r.API.CreateCluster(instaclustr.CadenceEndpoint, cadenceAPISpec) if err != nil { - logger.Error( + l.Error( err, "Cannot create Cadence cluster", - "cadence manifest", cadence.Spec, + "c manifest", c.Spec, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed, "Cluster creation on the Instaclustr is failed. Reason: %v", err) return ctrl.Result{}, err } - cadence.Status.ID = id - err = r.Status().Patch(ctx, cadence, patch) + c.Status.ID = id + err = r.Status().Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot update Cadence cluster status", - "cluster name", cadence.Spec.Name, - "cluster status", cadence.Status, + l.Error(err, "Cannot update Cadence cluster status", + "cluster name", c.Spec.Name, + "cluster status", c.Status, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource status patch is failed. Reason: %v", err) return ctrl.Result{}, err } - if cadence.Spec.Description != "" { - err = r.API.UpdateDescriptionAndTwoFactorDelete(instaclustr.ClustersEndpointV1, id, cadence.Spec.Description, nil) + if c.Spec.Description != "" { + err = r.API.UpdateDescriptionAndTwoFactorDelete(instaclustr.ClustersEndpointV1, id, c.Spec.Description, nil) if err != nil { - logger.Error(err, "Cannot update Cadence cluster description and TwoFactorDelete", - "cluster name", cadence.Spec.Name, - "description", cadence.Spec.Description, - "twoFactorDelete", cadence.Spec.TwoFactorDelete, + l.Error(err, "Cannot update Cadence cluster description and TwoFactorDelete", + "cluster name", c.Spec.Name, + "description", c.Spec.Description, + "twoFactorDelete", c.Spec.TwoFactorDelete, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed, "Cluster description and TwoFactoDelete update is failed. Reason: %v", err) } } - cadence.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent - controllerutil.AddFinalizer(cadence, models.DeletionFinalizer) + c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent + controllerutil.AddFinalizer(c, models.DeletionFinalizer) - err = r.Patch(ctx, cadence, patch) + err = r.Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot patch Cadence cluster", - "cluster name", cadence.Spec.Name, "patch", patch) + l.Error(err, "Cannot patch Cadence cluster", + "cluster name", c.Spec.Name, "patch", patch) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource status patch is failed. Reason: %v", err) return ctrl.Result{}, err } - logger.Info( + l.Info( "Cadence resource has been created", - "cluster name", cadence.Name, - "cluster ID", cadence.Status.ID, - "kind", cadence.Kind, - "api version", cadence.APIVersion, - "namespace", cadence.Namespace, + "cluster name", c.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api version", c.APIVersion, + "namespace", c.Namespace, ) - r.EventRecorder.Eventf(cadence, models.Normal, models.Created, + r.EventRecorder.Eventf(c, models.Normal, models.Created, "Cluster creation request is sent. Cluster ID: %s", id) } - if cadence.Status.State != models.DeletedStatus { - err := r.startClusterStatusJob(cadence) + if c.Status.State != models.DeletedStatus { + err := r.startClusterStatusJob(c) if err != nil { - logger.Error(err, "Cannot start cluster status job", - "cadence cluster ID", cadence.Status.ID, + l.Error(err, "Cannot start cluster status job", + "c cluster ID", c.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed, "Cluster status check job is failed. Reason: %v", err) return ctrl.Result{}, err } - r.EventRecorder.Event(cadence, models.Normal, models.Created, + r.EventRecorder.Event(c, models.Normal, models.Created, "Cluster status check job is started") } + if c.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetCadence(c.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", c.Spec.Name, + "data centres", c.Spec.DataCentres, + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iCadence, err := c.FromInstAPI(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + c, + r.EventRecorder, + iCadence.Status.ClusterStatus, + c.Spec.OnPremisesSpec, + newExposePorts(c.GetExposePorts()), + c.GetHeadlessPorts(), + c.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", c.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterOnPremisesIPsJob(c, bootstrap) + if err != nil { + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", c.Status.ID, + ) + + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", c.Spec.Name, + "on-premises Spec", c.Spec.OnPremisesSpec, + "cluster ID", c.Status.ID, + ) + return models.ExitReconcile, nil + } return ctrl.Result{}, nil } -func (r *CadenceReconciler) HandleUpdateCluster( +func (r *CadenceReconciler) handleUpdateCluster( ctx context.Context, - cadence *v1beta1.Cadence, - logger logr.Logger, + c *v1beta1.Cadence, + l logr.Logger, ) (ctrl.Result, error) { - iData, err := r.API.GetCadence(cadence.Status.ID) + iData, err := r.API.GetCadence(c.Status.ID) if err != nil { - logger.Error( + l.Error( err, "Cannot get Cadence cluster from the Instaclustr API", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.FetchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.FetchFailed, "Cluster fetch from the Instaclustr API is failed. Reason: %v", err) return ctrl.Result{}, err } - iCadence, err := cadence.FromInstAPI(iData) + iCadence, err := c.FromInstAPI(iData) if err != nil { - logger.Error( + l.Error( err, "Cannot convert Cadence cluster from the Instaclustr API", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(c, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return ctrl.Result{}, err } if iCadence.Status.CurrentClusterOperationStatus != models.NoOperation { - logger.Info("Cadence cluster is not ready to update", + l.Info("Cadence cluster is not ready to update", "cluster name", iCadence.Spec.Name, "cluster state", iCadence.Status.State, "current operation status", iCadence.Status.CurrentClusterOperationStatus, ) - patch := cadence.NewPatch() - cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent - cadence.Annotations[models.UpdateQueuedAnnotation] = models.True - err = r.Patch(ctx, cadence, patch) + patch := c.NewPatch() + c.Annotations[models.ResourceStateAnnotation] = models.UpdatingEvent + c.Annotations[models.UpdateQueuedAnnotation] = models.True + err = r.Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot patch Cadence cluster", - "cluster name", cadence.Spec.Name, + l.Error(err, "Cannot patch Cadence cluster", + "cluster name", c.Spec.Name, "patch", patch) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return ctrl.Result{}, err @@ -301,236 +386,271 @@ func (r *CadenceReconciler) HandleUpdateCluster( return models.ReconcileRequeue, nil } - if cadence.Annotations[models.ExternalChangesAnnotation] == models.True { - return r.handleExternalChanges(cadence, iCadence, logger) + if c.Annotations[models.ExternalChangesAnnotation] == models.True { + return r.handleExternalChanges(c, iCadence, l) } - if cadence.Spec.ClusterSettingsNeedUpdate(iCadence.Spec.Cluster) { - logger.Info("Updating cluster settings", + if c.Spec.ClusterSettingsNeedUpdate(iCadence.Spec.Cluster) { + l.Info("Updating cluster settings", "instaclustr description", iCadence.Spec.Description, "instaclustr two factor delete", iCadence.Spec.TwoFactorDelete) - err = r.API.UpdateClusterSettings(cadence.Status.ID, cadence.Spec.ClusterSettingsUpdateToInstAPI()) + err = r.API.UpdateClusterSettings(c.Status.ID, c.Spec.ClusterSettingsUpdateToInstAPI()) if err != nil { - logger.Error(err, "Cannot update cluster settings", - "cluster ID", cadence.Status.ID, "cluster spec", cadence.Spec) - r.EventRecorder.Eventf(cadence, models.Warning, models.UpdateFailed, + l.Error(err, "Cannot update cluster settings", + "cluster ID", c.Status.ID, "cluster spec", c.Spec) + r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed, "Cannot update cluster settings. Reason: %v", err) return ctrl.Result{}, err } } - logger.Info("Update request to Instaclustr API has been sent", - "spec data centres", cadence.Spec.DataCentres, - "resize settings", cadence.Spec.ResizeSettings, + l.Info("Update request to Instaclustr API has been sent", + "spec data centres", c.Spec.DataCentres, + "resize settings", c.Spec.ResizeSettings, ) - err = r.API.UpdateCluster(cadence.Status.ID, instaclustr.CadenceEndpoint, cadence.Spec.NewDCsUpdate()) + err = r.API.UpdateCluster(c.Status.ID, instaclustr.CadenceEndpoint, c.Spec.NewDCsUpdate()) if err != nil { - logger.Error(err, "Cannot update Cadence cluster", - "cluster ID", cadence.Status.ID, - "update request", cadence.Spec.NewDCsUpdate(), + l.Error(err, "Cannot update Cadence cluster", + "cluster ID", c.Status.ID, + "update request", c.Spec.NewDCsUpdate(), ) - r.EventRecorder.Eventf(cadence, models.Warning, models.UpdateFailed, + r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed, "Cluster update on the Instaclustr API is failed. Reason: %v", err) return ctrl.Result{}, err } - patch := cadence.NewPatch() - cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - cadence.Annotations[models.UpdateQueuedAnnotation] = "" - err = r.Patch(ctx, cadence, patch) + patch := c.NewPatch() + c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + c.Annotations[models.UpdateQueuedAnnotation] = "" + err = r.Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot patch Cadence cluster", - "cluster name", cadence.Spec.Name, + l.Error(err, "Cannot patch Cadence cluster", + "cluster name", c.Spec.Name, "patch", patch) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return ctrl.Result{}, err } - logger.Info( + l.Info( "Cluster has been updated", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, - "data centres", cadence.Spec.DataCentres, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "data centres", c.Spec.DataCentres, ) return ctrl.Result{}, nil } -func (r *CadenceReconciler) handleExternalChanges(cadence, iCadence *v1beta1.Cadence, l logr.Logger) (ctrl.Result, error) { - if !cadence.Spec.AreDCsEqual(iCadence.Spec.DataCentres) { +func (r *CadenceReconciler) handleExternalChanges(c, iCadence *v1beta1.Cadence, l logr.Logger) (ctrl.Result, error) { + if !c.Spec.AreDCsEqual(iCadence.Spec.DataCentres) { l.Info(msgExternalChanges, "instaclustr data", iCadence.Spec.DataCentres, - "k8s resource spec", cadence.Spec.DataCentres) + "k8s resource spec", c.Spec.DataCentres) - msgDiffSpecs, err := createSpecDifferenceMessage(cadence.Spec.DataCentres, iCadence.Spec.DataCentres) + msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec.DataCentres, iCadence.Spec.DataCentres) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", iCadence.Spec, "k8s resource spec", cadence.Spec) + "instaclustr data", iCadence.Spec, "k8s resource spec", c.Spec) return ctrl.Result{}, err } - r.EventRecorder.Eventf(cadence, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs) return ctrl.Result{}, nil } - patch := cadence.NewPatch() + patch := c.NewPatch() - cadence.Annotations[models.ExternalChangesAnnotation] = "" + c.Annotations[models.ExternalChangesAnnotation] = "" - err := r.Patch(context.Background(), cadence, patch) + err := r.Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cadence.Spec.Name, "cluster ID", cadence.Status.ID) + "cluster name", c.Spec.Name, "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return ctrl.Result{}, err } - l.Info("External changes have been reconciled", "resource ID", cadence.Status.ID) - r.EventRecorder.Event(cadence, models.Normal, models.ExternalChanges, "External changes have been reconciled") + l.Info("External changes have been reconciled", "resource ID", c.Status.ID) + r.EventRecorder.Event(c, models.Normal, models.ExternalChanges, "External changes have been reconciled") return ctrl.Result{}, nil } -func (r *CadenceReconciler) HandleDeleteCluster( +func (r *CadenceReconciler) handleDeleteCluster( ctx context.Context, - cadence *v1beta1.Cadence, - logger logr.Logger, + c *v1beta1.Cadence, + l logr.Logger, ) (ctrl.Result, error) { - _, err := r.API.GetCadence(cadence.Status.ID) + _, err := r.API.GetCadence(c.Status.ID) if err != nil && !errors.Is(err, instaclustr.NotFound) { - logger.Error( + l.Error( err, "Cannot get Cadence cluster status from the Instaclustr API", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.FetchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.FetchFailed, "Cluster resource fetch from the Instaclustr API is failed. Reason: %v", err) return ctrl.Result{}, err } if !errors.Is(err, instaclustr.NotFound) { - logger.Info("Sending cluster deletion to the Instaclustr API", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID) + l.Info("Sending cluster deletion to the Instaclustr API", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID) - err = r.API.DeleteCluster(cadence.Status.ID, instaclustr.CadenceEndpoint) + err = r.API.DeleteCluster(c.Status.ID, instaclustr.CadenceEndpoint) if err != nil { - logger.Error(err, "Cannot delete Cadence cluster", - "cluster name", cadence.Spec.Name, - "cluster status", cadence.Status, + l.Error(err, "Cannot delete Cadence cluster", + "cluster name", c.Spec.Name, + "cluster status", c.Status, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.DeletionFailed, + r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, "Cluster deletion is failed on the Instaclustr. Reason: %v", err) return ctrl.Result{}, err } - r.EventRecorder.Event(cadence, models.Normal, models.DeletionStarted, + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, "Cluster deletion request is sent to the Instaclustr API.") - if cadence.Spec.TwoFactorDelete != nil { - patch := cadence.NewPatch() + if c.Spec.TwoFactorDelete != nil { + patch := c.NewPatch() - cadence.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - cadence.Annotations[models.ClusterDeletionAnnotation] = models.Triggered - err = r.Patch(ctx, cadence, patch) + c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + c.Annotations[models.ClusterDeletionAnnotation] = models.Triggered + err = r.Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot patch cluster resource", - "cluster name", cadence.Spec.Name, - "cluster state", cadence.Status.State) - r.EventRecorder.Eventf(cadence, models.Warning, models.PatchFailed, + l.Error(err, "Cannot patch cluster resource", + "cluster name", c.Spec.Name, + "cluster state", c.Status.State) + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return ctrl.Result{}, err } - logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", cadence.Status.ID) + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", c.Status.ID) - r.EventRecorder.Event(cadence, models.Normal, models.DeletionStarted, + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") - return ctrl.Result{}, nil } + if c.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", c.Status.ID) + r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", c.Status.ID) + r.EventRecorder.Eventf(c, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + + patch := c.NewPatch() + controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) + + err = r.Patch(ctx, c, patch) + if err != nil { + l.Error(err, "Cannot patch cluster resource", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, + ) + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, + "Cluster resource patch is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + return reconcile.Result{}, err + } + r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker)) } - logger.Info("Cadence cluster is being deleted", - "cluster name", cadence.Spec.Name, - "cluster status", cadence.Status) + l.Info("Cadence cluster is being deleted", + "cluster name", c.Spec.Name, + "cluster status", c.Status) - for _, packagedProvisioning := range cadence.Spec.PackagedProvisioning { - err = r.deletePackagedResources(ctx, cadence, packagedProvisioning) + for _, packagedProvisioning := range c.Spec.PackagedProvisioning { + err = r.deletePackagedResources(ctx, c, packagedProvisioning) if err != nil { - logger.Error( + l.Error( err, "Cannot delete Cadence packaged resources", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) - r.EventRecorder.Eventf(cadence, models.Warning, models.DeletionFailed, + r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, "Cannot delete Cadence packaged resources. Reason: %v", err) return ctrl.Result{}, err } } - r.Scheduler.RemoveJob(cadence.GetJobID(scheduler.StatusChecker)) - patch := cadence.NewPatch() - controllerutil.RemoveFinalizer(cadence, models.DeletionFinalizer) - cadence.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent + r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + patch := c.NewPatch() + controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) + c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent - err = r.Patch(ctx, cadence, patch) + err = r.Patch(ctx, c, patch) if err != nil { - logger.Error(err, "Cannot patch Cadence cluster", - "cluster name", cadence.Spec.Name, + l.Error(err, "Cannot patch Cadence cluster", + "cluster name", c.Spec.Name, "patch", patch, ) return ctrl.Result{}, err } - err = exposeservice.Delete(r.Client, cadence.Name, cadence.Namespace) + err = exposeservice.Delete(r.Client, c.Name, c.Namespace) if err != nil { - logger.Error(err, "Cannot delete Cadence cluster expose service", - "cluster ID", cadence.Status.ID, - "cluster name", cadence.Spec.Name, + l.Error(err, "Cannot delete Cadence cluster expose service", + "cluster ID", c.Status.ID, + "cluster name", c.Spec.Name, ) return ctrl.Result{}, err } - logger.Info("Cadence cluster was deleted", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + l.Info("Cadence cluster was deleted", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) - r.EventRecorder.Event(cadence, models.Normal, models.Deleted, "Cluster resource is deleted") + r.EventRecorder.Event(c, models.Normal, models.Deleted, "Cluster resource is deleted") return ctrl.Result{}, nil } func (r *CadenceReconciler) preparePackagedSolution( ctx context.Context, - cluster *v1beta1.Cadence, + c *v1beta1.Cadence, packagedProvisioning *v1beta1.PackagedProvisioning, ) (bool, error) { - if len(cluster.Spec.DataCentres) < 1 { + if len(c.Spec.DataCentres) < 1 { return false, models.ErrZeroDataCentres } - labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, cluster.Name) + labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name) selector, err := labels.Parse(labelsToQuery) if err != nil { return false, err @@ -555,7 +675,7 @@ func (r *CadenceReconciler) preparePackagedSolution( models.CassandraAppKind) } - cassandraSpec, err := r.newCassandraSpec(cluster, cassandraVersions[len(cassandraVersions)-1].String()) + cassandraSpec, err := r.newCassandraSpec(c, cassandraVersions[len(cassandraVersions)-1].String()) if err != nil { return false, err } @@ -593,7 +713,7 @@ func (r *CadenceReconciler) preparePackagedSolution( models.KafkaAppType) } - kafkaSpec, err := r.newKafkaSpec(cluster, kafkaVersions[len(kafkaVersions)-1].String()) + kafkaSpec, err := r.newKafkaSpec(c, kafkaVersions[len(kafkaVersions)-1].String()) if err != nil { return false, err } @@ -631,7 +751,7 @@ func (r *CadenceReconciler) preparePackagedSolution( } // For OpenSearch we cannot use the latest version because is not supported by Cadence. So we use the oldest one. - osSpec, err := r.newOpenSearchSpec(cluster, openSearchVersions[0].String()) + osSpec, err := r.newOpenSearchSpec(c, openSearchVersions[0].String()) if err != nil { return false, err } @@ -657,7 +777,7 @@ func (r *CadenceReconciler) preparePackagedSolution( return true, nil } - cluster.Spec.StandardProvisioning = append(cluster.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{ + c.Spec.StandardProvisioning = append(c.Spec.StandardProvisioning, &v1beta1.StandardProvisioning{ AdvancedVisibility: advancedVisibilities, TargetCassandra: &v1beta1.TargetCassandra{ DependencyCDCID: cassandraList.Items[0].Status.DataCentres[0].ID, @@ -668,42 +788,42 @@ func (r *CadenceReconciler) preparePackagedSolution( return false, nil } -func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCassandraVersion string) (*v1beta1.Cassandra, error) { +func (r *CadenceReconciler) newCassandraSpec(c *v1beta1.Cadence, latestCassandraVersion string) (*v1beta1.Cassandra, error) { typeMeta := v1.TypeMeta{ Kind: models.CassandraKind, APIVersion: models.ClustersV1beta1APIVersion, } metadata := v1.ObjectMeta{ - Name: models.CassandraChildPrefix + cadence.Name, - Labels: map[string]string{models.ControlledByLabel: cadence.Name}, + Name: models.CassandraChildPrefix + c.Name, + Labels: map[string]string{models.ControlledByLabel: c.Name}, Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, - Namespace: cadence.ObjectMeta.Namespace, + Namespace: c.ObjectMeta.Namespace, Finalizers: []string{}, } - if len(cadence.Spec.DataCentres) == 0 { + if len(c.Spec.DataCentres) == 0 { return nil, models.ErrZeroDataCentres } - slaTier := cadence.Spec.SLATier - privateClusterNetwork := cadence.Spec.PrivateNetworkCluster - pciCompliance := cadence.Spec.PCICompliance + slaTier := c.Spec.SLATier + privateClusterNetwork := c.Spec.PrivateNetworkCluster + pciCompliance := c.Spec.PCICompliance var twoFactorDelete []*v1beta1.TwoFactorDelete - if len(cadence.Spec.TwoFactorDelete) > 0 { + if len(c.Spec.TwoFactorDelete) > 0 { twoFactorDelete = []*v1beta1.TwoFactorDelete{ { - Email: cadence.Spec.TwoFactorDelete[0].Email, - Phone: cadence.Spec.TwoFactorDelete[0].Phone, + Email: c.Spec.TwoFactorDelete[0].Email, + Phone: c.Spec.TwoFactorDelete[0].Phone, }, } } var cassNodeSize, network string var cassNodesNumber, cassReplicationFactor int var cassPrivateIPBroadcastForDiscovery, cassPasswordAndUserAuth bool - for _, dc := range cadence.Spec.DataCentres { - for _, pp := range cadence.Spec.PackagedProvisioning { + for _, dc := range c.Spec.DataCentres { + for _, pp := range c.Spec.PackagedProvisioning { cassNodeSize = pp.BundledCassandraSpec.NodeSize network = pp.BundledCassandraSpec.Network cassNodesNumber = pp.BundledCassandraSpec.NodesNumber @@ -722,9 +842,9 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas } dcName := models.CassandraChildDCName - dcRegion := cadence.Spec.DataCentres[0].Region - cloudProvider := cadence.Spec.DataCentres[0].CloudProvider - providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName + dcRegion := c.Spec.DataCentres[0].Region + cloudProvider := c.Spec.DataCentres[0].CloudProvider + providerAccountName := c.Spec.DataCentres[0].ProviderAccountName cassandraDataCentres := []*v1beta1.CassandraDataCentre{ { @@ -743,7 +863,7 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas } spec := v1beta1.CassandraSpec{ Cluster: v1beta1.Cluster{ - Name: models.CassandraChildPrefix + cadence.Name, + Name: models.CassandraChildPrefix + c.Name, Version: latestCassandraVersion, SLATier: slaTier, PrivateNetworkCluster: privateClusterNetwork, @@ -762,10 +882,21 @@ func (r *CadenceReconciler) newCassandraSpec(cadence *v1beta1.Cadence, latestCas }, nil } -func (r *CadenceReconciler) startClusterStatusJob(cadence *v1beta1.Cadence) error { - job := r.newWatchStatusJob(cadence) +func (r *CadenceReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cadence, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(c.Kind, b) + + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + +func (r *CadenceReconciler) startClusterStatusJob(c *v1beta1.Cadence) error { + job := r.newWatchStatusJob(c) - err := r.Scheduler.ScheduleJob(cadence.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -773,66 +904,66 @@ func (r *CadenceReconciler) startClusterStatusJob(cadence *v1beta1.Cadence) erro return nil } -func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) scheduler.Job { +func (r *CadenceReconciler) newWatchStatusJob(c *v1beta1.Cadence) scheduler.Job { l := log.Log.WithValues("component", "cadenceStatusClusterJob") return func() error { - namespacedName := client.ObjectKeyFromObject(cadence) - err := r.Get(context.Background(), namespacedName, cadence) + namespacedName := client.ObjectKeyFromObject(c) + err := r.Get(context.Background(), namespacedName, c) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(cadence.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) return nil } if err != nil { l.Error(err, "Cannot get Cadence custom resource", - "resource name", cadence.Name, + "resource name", c.Name, ) return err } - iData, err := r.API.GetCadence(cadence.Status.ID) + iData, err := r.API.GetCadence(c.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { - if cadence.DeletionTimestamp != nil { - _, err = r.HandleDeleteCluster(context.Background(), cadence, l) + if c.DeletionTimestamp != nil { + _, err = r.handleDeleteCluster(context.Background(), c, l) return err } - return r.handleExternalDelete(context.Background(), cadence) + return r.handleExternalDelete(context.Background(), c) } l.Error(err, "Cannot get Cadence cluster from the Instaclustr API", - "clusterID", cadence.Status.ID, + "clusterID", c.Status.ID, ) return err } - iCadence, err := cadence.FromInstAPI(iData) + iCadence, err := c.FromInstAPI(iData) if err != nil { l.Error(err, "Cannot convert Cadence cluster from the Instaclustr API", - "clusterID", cadence.Status.ID, + "clusterID", c.Status.ID, ) return err } - if !areStatusesEqual(&iCadence.Status.ClusterStatus, &cadence.Status.ClusterStatus) || - !areSecondaryCadenceTargetsEqual(cadence.Status.TargetSecondaryCadence, iCadence.Status.TargetSecondaryCadence) { + if !areStatusesEqual(&iCadence.Status.ClusterStatus, &c.Status.ClusterStatus) || + !areSecondaryCadenceTargetsEqual(c.Status.TargetSecondaryCadence, iCadence.Status.TargetSecondaryCadence) { l.Info("Updating Cadence cluster status", "new status", iCadence.Status.ClusterStatus, - "old status", cadence.Status.ClusterStatus, + "old status", c.Status.ClusterStatus, ) - areDCsEqual := areDataCentresEqual(iCadence.Status.ClusterStatus.DataCentres, cadence.Status.ClusterStatus.DataCentres) + areDCsEqual := areDataCentresEqual(iCadence.Status.ClusterStatus.DataCentres, c.Status.ClusterStatus.DataCentres) - patch := cadence.NewPatch() - cadence.Status.ClusterStatus = iCadence.Status.ClusterStatus - cadence.Status.TargetSecondaryCadence = iCadence.Status.TargetSecondaryCadence - err = r.Status().Patch(context.Background(), cadence, patch) + patch := c.NewPatch() + c.Status.ClusterStatus = iCadence.Status.ClusterStatus + c.Status.TargetSecondaryCadence = iCadence.Status.TargetSecondaryCadence + err = r.Status().Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch Cadence cluster", - "cluster name", cadence.Spec.Name, - "status", cadence.Status.State, + "cluster name", c.Spec.Name, + "status", c.Status.State, ) return err } @@ -845,9 +976,9 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule } err = exposeservice.Create(r.Client, - cadence.Name, - cadence.Namespace, - cadence.Spec.PrivateNetworkCluster, + c.Name, + c.Namespace, + c.Spec.PrivateNetworkCluster, nodes, models.CadenceConnectionPort) if err != nil { @@ -857,50 +988,50 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule } if iCadence.Status.CurrentClusterOperationStatus == models.NoOperation && - cadence.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent && - cadence.Annotations[models.UpdateQueuedAnnotation] != models.True && - !cadence.Spec.AreDCsEqual(iCadence.Spec.DataCentres) { + c.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent && + c.Annotations[models.UpdateQueuedAnnotation] != models.True && + !c.Spec.AreDCsEqual(iCadence.Spec.DataCentres) { l.Info(msgExternalChanges, "instaclustr data", iCadence.Spec.DataCentres, - "k8s resource spec", cadence.Spec.DataCentres) + "k8s resource spec", c.Spec.DataCentres) - patch := cadence.NewPatch() - cadence.Annotations[models.ExternalChangesAnnotation] = models.True + patch := c.NewPatch() + c.Annotations[models.ExternalChangesAnnotation] = models.True - err = r.Patch(context.Background(), cadence, patch) + err = r.Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch cluster cluster", - "cluster name", cadence.Spec.Name, "cluster state", cadence.Status.State) + "cluster name", c.Spec.Name, "cluster state", c.Status.State) return err } - msgDiffSpecs, err := createSpecDifferenceMessage(cadence.Spec.DataCentres, iCadence.Spec.DataCentres) + msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec.DataCentres, iCadence.Spec.DataCentres) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", iCadence.Spec, "k8s resource spec", cadence.Spec) + "instaclustr data", iCadence.Spec, "k8s resource spec", c.Spec) return err } - r.EventRecorder.Eventf(cadence, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs) } //TODO: change all context.Background() and context.TODO() to ctx from Reconcile - err = r.reconcileMaintenanceEvents(context.Background(), cadence) + err = r.reconcileMaintenanceEvents(context.Background(), c) if err != nil { l.Error(err, "Cannot reconcile cluster maintenance events", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) return err } - if cadence.Status.State == models.RunningStatus && cadence.Status.CurrentClusterOperationStatus == models.OperationInProgress { - patch := cadence.NewPatch() - for _, dc := range cadence.Status.DataCentres { + if c.Status.State == models.RunningStatus && c.Status.CurrentClusterOperationStatus == models.OperationInProgress { + patch := c.NewPatch() + for _, dc := range c.Status.DataCentres { resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID) if err != nil { l.Error(err, "Cannot get data centre resize operations", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, "data centre ID", dc.ID, ) @@ -908,11 +1039,11 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule } dc.ResizeOperations = resizeOperations - err = r.Status().Patch(context.Background(), cadence, patch) + err = r.Status().Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch data centre resize operations", - "cluster name", cadence.Spec.Name, - "cluster ID", cadence.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, "data centre ID", dc.ID, ) @@ -925,36 +1056,36 @@ func (r *CadenceReconciler) newWatchStatusJob(cadence *v1beta1.Cadence) schedule } } -func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVersion string) (*v1beta1.Kafka, error) { +func (r *CadenceReconciler) newKafkaSpec(c *v1beta1.Cadence, latestKafkaVersion string) (*v1beta1.Kafka, error) { typeMeta := v1.TypeMeta{ Kind: models.KafkaKind, APIVersion: models.ClustersV1beta1APIVersion, } metadata := v1.ObjectMeta{ - Name: models.KafkaChildPrefix + cadence.Name, - Labels: map[string]string{models.ControlledByLabel: cadence.Name}, + Name: models.KafkaChildPrefix + c.Name, + Labels: map[string]string{models.ControlledByLabel: c.Name}, Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, - Namespace: cadence.ObjectMeta.Namespace, + Namespace: c.ObjectMeta.Namespace, Finalizers: []string{}, } - if len(cadence.Spec.DataCentres) == 0 { + if len(c.Spec.DataCentres) == 0 { return nil, models.ErrZeroDataCentres } var kafkaTFD []*v1beta1.TwoFactorDelete - for _, cadenceTFD := range cadence.Spec.TwoFactorDelete { + for _, cadenceTFD := range c.Spec.TwoFactorDelete { twoFactorDelete := &v1beta1.TwoFactorDelete{ Email: cadenceTFD.Email, Phone: cadenceTFD.Phone, } kafkaTFD = append(kafkaTFD, twoFactorDelete) } - bundledKafkaSpec := cadence.Spec.PackagedProvisioning[0].BundledKafkaSpec + bundledKafkaSpec := c.Spec.PackagedProvisioning[0].BundledKafkaSpec kafkaNetwork := bundledKafkaSpec.Network - for _, cadenceDC := range cadence.Spec.DataCentres { + for _, cadenceDC := range c.Spec.DataCentres { isKafkaNetworkOverlaps, err := cadenceDC.IsNetworkOverlaps(kafkaNetwork) if err != nil { return nil, err @@ -967,9 +1098,9 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe kafkaNodeSize := bundledKafkaSpec.NodeSize kafkaNodesNumber := bundledKafkaSpec.NodesNumber dcName := models.KafkaChildDCName - dcRegion := cadence.Spec.DataCentres[0].Region - cloudProvider := cadence.Spec.DataCentres[0].CloudProvider - providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName + dcRegion := c.Spec.DataCentres[0].Region + cloudProvider := c.Spec.DataCentres[0].CloudProvider + providerAccountName := c.Spec.DataCentres[0].ProviderAccountName kafkaDataCentres := []*v1beta1.KafkaDataCentre{ { DataCentre: v1beta1.DataCentre{ @@ -984,13 +1115,13 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe }, } - slaTier := cadence.Spec.SLATier - privateClusterNetwork := cadence.Spec.PrivateNetworkCluster - pciCompliance := cadence.Spec.PCICompliance - clientEncryption := cadence.Spec.DataCentres[0].ClientEncryption + slaTier := c.Spec.SLATier + privateClusterNetwork := c.Spec.PrivateNetworkCluster + pciCompliance := c.Spec.PCICompliance + clientEncryption := c.Spec.DataCentres[0].ClientEncryption spec := v1beta1.KafkaSpec{ Cluster: v1beta1.Cluster{ - Name: models.KafkaChildPrefix + cadence.Name, + Name: models.KafkaChildPrefix + c.Name, Version: latestKafkaVersion, SLATier: slaTier, PrivateNetworkCluster: privateClusterNetwork, @@ -1013,25 +1144,25 @@ func (r *CadenceReconciler) newKafkaSpec(cadence *v1beta1.Cadence, latestKafkaVe }, nil } -func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOpenSearchVersion string) (*v1beta1.OpenSearch, error) { +func (r *CadenceReconciler) newOpenSearchSpec(c *v1beta1.Cadence, oldestOpenSearchVersion string) (*v1beta1.OpenSearch, error) { typeMeta := v1.TypeMeta{ Kind: models.OpenSearchKind, APIVersion: models.ClustersV1beta1APIVersion, } metadata := v1.ObjectMeta{ - Name: models.OpenSearchChildPrefix + cadence.Name, - Labels: map[string]string{models.ControlledByLabel: cadence.Name}, + Name: models.OpenSearchChildPrefix + c.Name, + Labels: map[string]string{models.ControlledByLabel: c.Name}, Annotations: map[string]string{models.ResourceStateAnnotation: models.CreatingEvent}, - Namespace: cadence.ObjectMeta.Namespace, + Namespace: c.ObjectMeta.Namespace, Finalizers: []string{}, } - if len(cadence.Spec.DataCentres) < 1 { + if len(c.Spec.DataCentres) < 1 { return nil, models.ErrZeroDataCentres } - bundledOpenSearchSpec := cadence.Spec.PackagedProvisioning[0].BundledOpenSearchSpec + bundledOpenSearchSpec := c.Spec.PackagedProvisioning[0].BundledOpenSearchSpec managerNodes := []*v1beta1.ClusterManagerNodes{{ NodeSize: bundledOpenSearchSpec.NodeSize, @@ -1039,22 +1170,22 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp }} osReplicationFactor := bundledOpenSearchSpec.ReplicationFactor - slaTier := cadence.Spec.SLATier - privateClusterNetwork := cadence.Spec.PrivateNetworkCluster - pciCompliance := cadence.Spec.PCICompliance + slaTier := c.Spec.SLATier + privateClusterNetwork := c.Spec.PrivateNetworkCluster + pciCompliance := c.Spec.PCICompliance var twoFactorDelete []*v1beta1.TwoFactorDelete - if len(cadence.Spec.TwoFactorDelete) > 0 { + if len(c.Spec.TwoFactorDelete) > 0 { twoFactorDelete = []*v1beta1.TwoFactorDelete{ { - Email: cadence.Spec.TwoFactorDelete[0].Email, - Phone: cadence.Spec.TwoFactorDelete[0].Phone, + Email: c.Spec.TwoFactorDelete[0].Email, + Phone: c.Spec.TwoFactorDelete[0].Phone, }, } } osNetwork := bundledOpenSearchSpec.Network - isOsNetworkOverlaps, err := cadence.Spec.DataCentres[0].IsNetworkOverlaps(osNetwork) + isOsNetworkOverlaps, err := c.Spec.DataCentres[0].IsNetworkOverlaps(osNetwork) if err != nil { return nil, err } @@ -1063,9 +1194,9 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp } dcName := models.OpenSearchChildDCName - dcRegion := cadence.Spec.DataCentres[0].Region - cloudProvider := cadence.Spec.DataCentres[0].CloudProvider - providerAccountName := cadence.Spec.DataCentres[0].ProviderAccountName + dcRegion := c.Spec.DataCentres[0].Region + cloudProvider := c.Spec.DataCentres[0].CloudProvider + providerAccountName := c.Spec.DataCentres[0].ProviderAccountName osDataCentres := []*v1beta1.OpenSearchDataCentre{ { @@ -1079,7 +1210,7 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp } spec := v1beta1.OpenSearchSpec{ Cluster: v1beta1.Cluster{ - Name: models.OpenSearchChildPrefix + cadence.Name, + Name: models.OpenSearchChildPrefix + c.Name, Version: oldestOpenSearchVersion, SLATier: slaTier, PrivateNetworkCluster: privateClusterNetwork, @@ -1100,10 +1231,10 @@ func (r *CadenceReconciler) newOpenSearchSpec(cadence *v1beta1.Cadence, oldestOp func (r *CadenceReconciler) deletePackagedResources( ctx context.Context, - cadence *v1beta1.Cadence, + c *v1beta1.Cadence, packagedProvisioning *v1beta1.PackagedProvisioning, ) error { - labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, cadence.Name) + labelsToQuery := fmt.Sprintf("%s=%s", models.ControlledByLabel, c.Name) selector, err := labels.Parse(labelsToQuery) if err != nil { return err diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index c69128726..610cf7ea4 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -45,10 +45,6 @@ import ( "github.com/instaclustr/operator/pkg/scheduler" ) -const ( - StatusRUNNING = "RUNNING" -) - // CassandraReconciler reconciles a Cassandra object type CassandraReconciler struct { client.Client @@ -62,8 +58,15 @@ type CassandraReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=cassandras/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch -//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=endpoints,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -73,8 +76,8 @@ type CassandraReconciler struct { func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) - cassandra := &v1beta1.Cassandra{} - err := r.Client.Get(ctx, req.NamespacedName, cassandra) + c := &v1beta1.Cassandra{} + err := r.Client.Get(ctx, req.NamespacedName, c) if err != nil { if k8serrors.IsNotFound(err) { l.Info("Cassandra resource is not found", @@ -87,23 +90,23 @@ func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return reconcile.Result{}, err } - switch cassandra.Annotations[models.ResourceStateAnnotation] { + switch c.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.handleCreateCluster(ctx, l, cassandra) + return r.handleCreateCluster(ctx, l, c) case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, l, cassandra) + return r.handleUpdateCluster(ctx, l, c) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, l, cassandra) + return r.handleDeleteCluster(ctx, l, c) case models.GenericEvent: l.Info("Event isn't handled", - "cluster name", cassandra.Spec.Name, + "cluster name", c.Spec.Name, "request", req, - "event", cassandra.Annotations[models.ResourceStateAnnotation]) + "event", c.Annotations[models.ResourceStateAnnotation]) return models.ExitReconcile, nil default: l.Info("Event isn't handled", "request", req, - "event", cassandra.Annotations[models.ResourceStateAnnotation]) + "event", c.Annotations[models.ResourceStateAnnotation]) return models.ExitReconcile, nil } @@ -112,27 +115,27 @@ func (r *CassandraReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( func (r *CassandraReconciler) handleCreateCluster( ctx context.Context, l logr.Logger, - cassandra *v1beta1.Cassandra, + c *v1beta1.Cassandra, ) (reconcile.Result, error) { l = l.WithName("Cassandra creation event") var err error - patch := cassandra.NewPatch() - if cassandra.Status.ID == "" { + patch := c.NewPatch() + if c.Status.ID == "" { var id string - if cassandra.Spec.HasRestore() { + if c.Spec.HasRestore() { l.Info( "Creating cluster from backup", - "original cluster ID", cassandra.Spec.RestoreFrom.ClusterID, + "original cluster ID", c.Spec.RestoreFrom.ClusterID, ) - id, err = r.API.RestoreCluster(cassandra.RestoreInfoToInstAPI(cassandra.Spec.RestoreFrom), models.CassandraAppKind) + id, err = r.API.RestoreCluster(c.RestoreInfoToInstAPI(c.Spec.RestoreFrom), models.CassandraAppKind) if err != nil { l.Error(err, "Cannot restore cluster from backup", - "original cluster ID", cassandra.Spec.RestoreFrom.ClusterID, + "original cluster ID", c.Spec.RestoreFrom.ClusterID, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, + c, models.Warning, models.CreationFailed, "Cluster restore from backup on the Instaclustr is failed. Reason: %v", err, ) @@ -141,26 +144,26 @@ func (r *CassandraReconciler) handleCreateCluster( } r.EventRecorder.Eventf( - cassandra, models.Normal, models.Created, + c, models.Normal, models.Created, "Cluster restore request is sent. Original cluster ID: %s, new cluster ID: %s", - cassandra.Spec.RestoreFrom.ClusterID, + c.Spec.RestoreFrom.ClusterID, id, ) } else { l.Info( "Creating cluster", - "cluster name", cassandra.Spec.Name, - "data centres", cassandra.Spec.DataCentres, + "cluster name", c.Spec.Name, + "data centres", c.Spec.DataCentres, ) - id, err = r.API.CreateCluster(instaclustr.CassandraEndpoint, cassandra.Spec.ToInstAPI()) + id, err = r.API.CreateCluster(instaclustr.CassandraEndpoint, c.Spec.ToInstAPI()) if err != nil { l.Error( err, "Cannot create cluster", - "cluster spec", cassandra.Spec, + "cluster spec", c.Spec, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, + c, models.Warning, models.CreationFailed, "Cluster creation on the Instaclustr is failed. Reason: %v", err, ) @@ -168,45 +171,45 @@ func (r *CassandraReconciler) handleCreateCluster( } r.EventRecorder.Eventf( - cassandra, models.Normal, models.Created, + c, models.Normal, models.Created, "Cluster creation request is sent. Cluster ID: %s", id, ) } - cassandra.Status.ID = id - err = r.Status().Patch(ctx, cassandra, patch) + c.Status.ID = id + err = r.Status().Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster status", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, - "cluster metadata", cassandra.ObjectMeta, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.PatchFailed, + c, models.Warning, models.PatchFailed, "Cluster resource status patch is failed. Reason: %v", err, ) return reconcile.Result{}, err } - controllerutil.AddFinalizer(cassandra, models.DeletionFinalizer) - cassandra.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent - err = r.Patch(ctx, cassandra, patch) + controllerutil.AddFinalizer(c, models.DeletionFinalizer) + c.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent + err = r.Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, - "cluster metadata", cassandra.ObjectMeta, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.PatchFailed, + c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) @@ -215,22 +218,22 @@ func (r *CassandraReconciler) handleCreateCluster( l.Info( "Cluster has been created", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, ) } - if cassandra.Status.State != models.DeletedStatus { - err = r.startClusterStatusJob(cassandra) + if c.Status.State != models.DeletedStatus { + err = r.startClusterStatusJob(c) if err != nil { l.Error(err, "Cannot start cluster status job", - "cassandra cluster ID", cassandra.Status.ID) + "c cluster ID", c.Status.ID) r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, + c, models.Warning, models.CreationFailed, "Cluster status check job is failed. Reason: %v", err, ) @@ -238,41 +241,118 @@ func (r *CassandraReconciler) handleCreateCluster( } r.EventRecorder.Eventf( - cassandra, models.Normal, models.Created, + c, models.Normal, models.Created, "Cluster status check job is started", ) + } + if c.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetCassandra(c.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", c.Spec.Name, + "data centres", c.Spec.DataCentres, + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iCassandra, err := c.FromInstAPI(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + c, + r.EventRecorder, + iCassandra.Status.ClusterStatus, + c.Spec.OnPremisesSpec, + newExposePorts(c.GetExposePorts()), + c.GetHeadlessPorts(), + c.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", c.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + c, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } - err = r.startClusterBackupsJob(cassandra) + err = r.startClusterOnPremisesIPsJob(c, bootstrap) if err != nil { - l.Error(err, "Cannot start cluster backups check job", - "cluster ID", cassandra.Status.ID, + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", c.Status.ID, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.CreationFailed, - "Cluster backups check job is failed. Reason: %v", + c, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", err, ) return reconcile.Result{}, err } + l.Info( + "On-premises resources have been created", + "cluster name", c.Spec.Name, + "on-premises Spec", c.Spec.OnPremisesSpec, + "cluster ID", c.Status.ID, + ) + return models.ExitReconcile, nil + } + + err = r.startClusterBackupsJob(c) + if err != nil { + l.Error(err, "Cannot start cluster backups check job", + "cluster ID", c.Status.ID, + ) + r.EventRecorder.Eventf( - cassandra, models.Normal, models.Created, - "Cluster backups check job is started", + c, models.Warning, models.CreationFailed, + "Cluster backups check job is failed. Reason: %v", + err, ) + return reconcile.Result{}, err + } - if cassandra.Spec.UserRefs != nil && cassandra.Status.AvailableUsers == nil { - err = r.startUsersCreationJob(cassandra) - if err != nil { - l.Error(err, "Failed to start user creation job") - r.EventRecorder.Eventf(cassandra, models.Warning, models.CreationFailed, - "User creation job is failed. Reason: %v", err) - return reconcile.Result{}, err - } + r.EventRecorder.Eventf( + c, models.Normal, models.Created, + "Cluster backups check job is started", + ) - r.EventRecorder.Event(cassandra, models.Normal, models.Created, - "Cluster user creation job is started") + if c.Spec.UserRefs != nil && c.Status.AvailableUsers == nil { + err = r.startUsersCreationJob(c) + if err != nil { + l.Error(err, "Failed to start user creation job") + r.EventRecorder.Eventf(c, models.Warning, models.CreationFailed, + "User creation job is failed. Reason: %v", err) + return reconcile.Result{}, err } + + r.EventRecorder.Event(c, models.Normal, models.Created, + "Cluster user creation job is started") } return models.ExitReconcile, nil @@ -281,99 +361,99 @@ func (r *CassandraReconciler) handleCreateCluster( func (r *CassandraReconciler) handleUpdateCluster( ctx context.Context, l logr.Logger, - cassandra *v1beta1.Cassandra, + c *v1beta1.Cassandra, ) (reconcile.Result, error) { l = l.WithName("Cassandra update event") - iData, err := r.API.GetCassandra(cassandra.Status.ID) + iData, err := r.API.GetCassandra(c.Status.ID) if err != nil { l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.FetchFailed, + c, models.Warning, models.FetchFailed, "Cluster fetch from the Instaclustr API is failed. Reason: %v", err, ) return reconcile.Result{}, err } - iCassandra, err := cassandra.FromInstAPI(iData) + iCassandra, err := c.FromInstAPI(iData) if err != nil { l.Error( err, "Cannot convert cluster from the Instaclustr API", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.ConvertionFailed, + c, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) return reconcile.Result{}, err } - if cassandra.Annotations[models.ExternalChangesAnnotation] == models.True { - return r.handleExternalChanges(cassandra, iCassandra, l) + if c.Annotations[models.ExternalChangesAnnotation] == models.True { + return r.handleExternalChanges(c, iCassandra, l) } - patch := cassandra.NewPatch() + patch := c.NewPatch() - if cassandra.Spec.ClusterSettingsNeedUpdate(iCassandra.Spec.Cluster) { + if c.Spec.ClusterSettingsNeedUpdate(iCassandra.Spec.Cluster) { l.Info("Updating cluster settings", "instaclustr description", iCassandra.Spec.Description, "instaclustr two factor delete", iCassandra.Spec.TwoFactorDelete) - err = r.API.UpdateClusterSettings(cassandra.Status.ID, cassandra.Spec.ClusterSettingsUpdateToInstAPI()) + err = r.API.UpdateClusterSettings(c.Status.ID, c.Spec.ClusterSettingsUpdateToInstAPI()) if err != nil { l.Error(err, "Cannot update cluster settings", - "cluster ID", cassandra.Status.ID, "cluster spec", cassandra.Spec) - r.EventRecorder.Eventf(cassandra, models.Warning, models.UpdateFailed, + "cluster ID", c.Status.ID, "cluster spec", c.Spec) + r.EventRecorder.Eventf(c, models.Warning, models.UpdateFailed, "Cannot update cluster settings. Reason: %v", err) return reconcile.Result{}, err } } - if !cassandra.Spec.AreDCsEqual(iCassandra.Spec.DataCentres) { + if !c.Spec.AreDCsEqual(iCassandra.Spec.DataCentres) { l.Info("Update request to Instaclustr API has been sent", - "spec data centres", cassandra.Spec.DataCentres, - "resize settings", cassandra.Spec.ResizeSettings, + "spec data centres", c.Spec.DataCentres, + "resize settings", c.Spec.ResizeSettings, ) - err = r.API.UpdateCassandra(cassandra.Status.ID, cassandra.Spec.DCsUpdateToInstAPI()) + err = r.API.UpdateCassandra(c.Status.ID, c.Spec.DCsUpdateToInstAPI()) if err != nil { l.Error(err, "Cannot update cluster", - "cluster ID", cassandra.Status.ID, - "cluster name", cassandra.Spec.Name, - "cluster spec", cassandra.Spec, - "cluster state", cassandra.Status.State, + "cluster ID", c.Status.ID, + "cluster name", c.Spec.Name, + "cluster spec", c.Spec, + "cluster state", c.Status.State, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.UpdateFailed, + c, models.Warning, models.UpdateFailed, "Cluster update on the Instaclustr API is failed. Reason: %v", err, ) if errors.Is(err, instaclustr.ClusterIsNotReadyToResize) { - patch := cassandra.NewPatch() - cassandra.Annotations[models.UpdateQueuedAnnotation] = models.True - err = r.Patch(ctx, cassandra, patch) + patch := c.NewPatch() + c.Annotations[models.UpdateQueuedAnnotation] = models.True + err = r.Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, - "cluster metadata", cassandra.ObjectMeta, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.PatchFailed, + c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) @@ -385,29 +465,29 @@ func (r *CassandraReconciler) handleUpdateCluster( } } - err = handleUsersChanges(ctx, r.Client, r, cassandra) + err = handleUsersChanges(ctx, r.Client, r, c) if err != nil { l.Error(err, "Failed to handle users changes") - r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Handling users changes is failed. Reason: %w", err, ) return reconcile.Result{}, err } - cassandra.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - cassandra.Annotations[models.UpdateQueuedAnnotation] = "" - err = r.Patch(ctx, cassandra, patch) + c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + c.Annotations[models.UpdateQueuedAnnotation] = "" + err = r.Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, - "cluster metadata", cassandra.ObjectMeta, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.PatchFailed, + c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) @@ -416,49 +496,49 @@ func (r *CassandraReconciler) handleUpdateCluster( l.Info( "Cluster has been updated", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "data centres", cassandra.Spec.DataCentres, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "data centres", c.Spec.DataCentres, ) return models.ExitReconcile, nil } -func (r *CassandraReconciler) handleExternalChanges(cassandra, iCassandra *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) { - if !cassandra.Spec.IsEqual(iCassandra.Spec) { +func (r *CassandraReconciler) handleExternalChanges(c, iCassandra *v1beta1.Cassandra, l logr.Logger) (reconcile.Result, error) { + if !c.Spec.IsEqual(iCassandra.Spec) { l.Info(msgSpecStillNoMatch, - "specification of k8s resource", cassandra.Spec, + "specification of k8s resource", c.Spec, "data from Instaclustr ", iCassandra.Spec) - msgDiffSpecs, err := createSpecDifferenceMessage(cassandra.Spec, iCassandra.Spec) + msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec, iCassandra.Spec) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec) + "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec) return models.ExitReconcile, nil } - r.EventRecorder.Eventf(cassandra, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs) return models.ExitReconcile, nil } - patch := cassandra.NewPatch() + patch := c.NewPatch() - cassandra.Annotations[models.ExternalChangesAnnotation] = "" + c.Annotations[models.ExternalChangesAnnotation] = "" - err := r.Patch(context.Background(), cassandra, patch) + err := r.Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cassandra.Spec.Name, "cluster ID", cassandra.Status.ID) + "cluster name", c.Spec.Name, "cluster ID", c.Status.ID) - r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return reconcile.Result{}, err } - l.Info("External changes have been reconciled", "resource ID", cassandra.Status.ID) - r.EventRecorder.Event(cassandra, models.Normal, models.ExternalChanges, "External changes have been reconciled") + l.Info("External changes have been reconciled", "resource ID", c.Status.ID) + r.EventRecorder.Event(c, models.Normal, models.ExternalChanges, "External changes have been reconciled") return models.ExitReconcile, nil } @@ -466,91 +546,108 @@ func (r *CassandraReconciler) handleExternalChanges(cassandra, iCassandra *v1bet func (r *CassandraReconciler) handleDeleteCluster( ctx context.Context, l logr.Logger, - cassandra *v1beta1.Cassandra, + c *v1beta1.Cassandra, ) (reconcile.Result, error) { l = l.WithName("Cassandra deletion event") - _, err := r.API.GetCassandra(cassandra.Status.ID) + _, err := r.API.GetCassandra(c.Status.ID) if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error( err, "Cannot get cluster from the Instaclustr API", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.FetchFailed, + c, models.Warning, models.FetchFailed, "Cluster fetch from the Instaclustr API is failed. Reason: %v", err, ) return reconcile.Result{}, err } - patch := cassandra.NewPatch() + patch := c.NewPatch() if !errors.Is(err, instaclustr.NotFound) { l.Info("Sending cluster deletion to the Instaclustr API", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID) + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID) - err = r.API.DeleteCluster(cassandra.Status.ID, instaclustr.CassandraEndpoint) + err = r.API.DeleteCluster(c.Status.ID, instaclustr.CassandraEndpoint) if err != nil { l.Error(err, "Cannot delete cluster", - "cluster name", cassandra.Spec.Name, - "state", cassandra.Status.State, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, + "cluster name", c.Spec.Name, + "state", c.Status.State, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.DeletionFailed, + c, models.Warning, models.DeletionFailed, "Cluster deletion on the Instaclustr API is failed. Reason: %v", err, ) return reconcile.Result{}, err } - r.EventRecorder.Event(cassandra, models.Normal, models.DeletionStarted, + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, "Cluster deletion request is sent to the Instaclustr API.") - if cassandra.Spec.TwoFactorDelete != nil { - cassandra.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - cassandra.Annotations[models.ClusterDeletionAnnotation] = models.Triggered - err = r.Patch(ctx, cassandra, patch) + if c.Spec.TwoFactorDelete != nil { + c.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + c.Annotations[models.ClusterDeletionAnnotation] = models.Triggered + err = r.Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cassandra.Spec.Name, - "cluster state", cassandra.Status.State) - r.EventRecorder.Eventf(cassandra, models.Warning, models.PatchFailed, + "cluster name", c.Spec.Name, + "cluster state", c.Status.State) + r.EventRecorder.Eventf(c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return reconcile.Result{}, err } - l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", cassandra.Status.ID) + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", c.Status.ID) - r.EventRecorder.Event(cassandra, models.Normal, models.DeletionStarted, + r.EventRecorder.Event(c, models.Normal, models.DeletionStarted, "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") return models.ExitReconcile, nil } } - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) + + if c.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, c.Status.ID, c.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", c.Status.ID) + r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } - l.Info("Deleting cluster backup resources", "cluster ID", cassandra.Status.ID) + l.Info("Cluster on-premises resources are deleted", + "cluster ID", c.Status.ID) + r.EventRecorder.Eventf(c, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + r.Scheduler.RemoveJob(c.GetJobID(scheduler.OnPremisesIPsChecker)) + } - err = r.deleteBackups(ctx, cassandra.Status.ID, cassandra.Namespace) + l.Info("Deleting cluster backup resources", "cluster ID", c.Status.ID) + + err = r.deleteBackups(ctx, c.Status.ID, c.Namespace) if err != nil { l.Error(err, "Cannot delete cluster backup resources", - "cluster ID", cassandra.Status.ID, + "cluster ID", c.Status.ID, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.DeletionFailed, + c, models.Warning, models.DeletionFailed, "Cluster backups deletion is failed. Reason: %v", err, ) @@ -558,72 +655,72 @@ func (r *CassandraReconciler) handleDeleteCluster( } l.Info("Cluster backup resources were deleted", - "cluster ID", cassandra.Status.ID, + "cluster ID", c.Status.ID, ) r.EventRecorder.Eventf( - cassandra, models.Normal, models.Deleted, + c, models.Normal, models.Deleted, "Cluster backup resources are deleted", ) - err = detachUsers(ctx, r.Client, r, cassandra) + err = detachUsers(ctx, r.Client, r, c) if err != nil { l.Error(err, "Failed to detach users from the cluster") - r.EventRecorder.Eventf(cassandra, models.Warning, models.DeletionFailed, + r.EventRecorder.Eventf(c, models.Warning, models.DeletionFailed, "Detaching users from the cluster is failed. Reason: %w", err, ) return reconcile.Result{}, err } - controllerutil.RemoveFinalizer(cassandra, models.DeletionFinalizer) - cassandra.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent - err = r.Patch(ctx, cassandra, patch) + controllerutil.RemoveFinalizer(c, models.DeletionFinalizer) + c.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent + err = r.Patch(ctx, c, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion, - "namespace", cassandra.Namespace, - "cluster metadata", cassandra.ObjectMeta, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion, + "namespace", c.Namespace, + "cluster metadata", c.ObjectMeta, ) r.EventRecorder.Eventf( - cassandra, models.Warning, models.PatchFailed, + c, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) return reconcile.Result{}, err } - err = exposeservice.Delete(r.Client, cassandra.Name, cassandra.Namespace) + err = exposeservice.Delete(r.Client, c.Name, c.Namespace) if err != nil { l.Error(err, "Cannot delete Cassandra cluster expose service", - "cluster ID", cassandra.Status.ID, - "cluster name", cassandra.Spec.Name, + "cluster ID", c.Status.ID, + "cluster name", c.Spec.Name, ) return reconcile.Result{}, err } l.Info("Cluster has been deleted", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, - "kind", cassandra.Kind, - "api Version", cassandra.APIVersion) + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, + "kind", c.Kind, + "api Version", c.APIVersion) r.EventRecorder.Eventf( - cassandra, models.Normal, models.Deleted, + c, models.Normal, models.Deleted, "Cluster resource is deleted", ) return models.ExitReconcile, nil } -func (r *CassandraReconciler) startClusterStatusJob(cassandraCluster *v1beta1.Cassandra) error { - job := r.newWatchStatusJob(cassandraCluster) +func (r *CassandraReconciler) startClusterStatusJob(c *v1beta1.Cassandra) error { + job := r.newWatchStatusJob(c) - err := r.Scheduler.ScheduleJob(cassandraCluster.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.StatusChecker), scheduler.ClusterStatusInterval, job) if err != nil { return err } @@ -653,55 +750,66 @@ func (r *CassandraReconciler) startUsersCreationJob(cluster *v1beta1.Cassandra) return nil } -func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) scheduler.Job { +func (r *CassandraReconciler) startClusterOnPremisesIPsJob(c *v1beta1.Cassandra, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(c.Kind, b) + + err := r.Scheduler.ScheduleJob(c.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + +func (r *CassandraReconciler) newWatchStatusJob(c *v1beta1.Cassandra) scheduler.Job { l := log.Log.WithValues("component", "CassandraStatusClusterJob") return func() error { - namespacedName := client.ObjectKeyFromObject(cassandra) - err := r.Get(context.Background(), namespacedName, cassandra) + namespacedName := client.ObjectKeyFromObject(c) + err := r.Get(context.Background(), namespacedName, c) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(cassandra.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.BackupsChecker)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(c.GetJobID(scheduler.StatusChecker)) return nil } - iData, err := r.API.GetCassandra(cassandra.Status.ID) + iData, err := r.API.GetCassandra(c.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { - if cassandra.DeletionTimestamp != nil { - _, err = r.handleDeleteCluster(context.Background(), l, cassandra) + if c.DeletionTimestamp != nil { + _, err = r.handleDeleteCluster(context.Background(), l, c) return err } - return r.handleExternalDelete(context.Background(), cassandra) + return r.handleExternalDelete(context.Background(), c) } l.Error(err, "Cannot get cluster from the Instaclustr API", - "clusterID", cassandra.Status.ID) + "clusterID", c.Status.ID) return err } - iCassandra, err := cassandra.FromInstAPI(iData) + iCassandra, err := c.FromInstAPI(iData) if err != nil { l.Error(err, "Cannot convert cluster from the Instaclustr API", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) return err } - if !areStatusesEqual(&iCassandra.Status.ClusterStatus, &cassandra.Status.ClusterStatus) { + if !areStatusesEqual(&iCassandra.Status.ClusterStatus, &c.Status.ClusterStatus) { l.Info("Updating cluster status", "status from Instaclustr", iCassandra.Status.ClusterStatus, - "status from k8s", cassandra.Status.ClusterStatus) + "status from k8s", c.Status.ClusterStatus) - areDCsEqual := areDataCentresEqual(iCassandra.Status.ClusterStatus.DataCentres, cassandra.Status.ClusterStatus.DataCentres) + areDCsEqual := areDataCentresEqual(iCassandra.Status.ClusterStatus.DataCentres, c.Status.ClusterStatus.DataCentres) - patch := cassandra.NewPatch() - cassandra.Status.ClusterStatus = iCassandra.Status.ClusterStatus - err = r.Status().Patch(context.Background(), cassandra, patch) + patch := c.NewPatch() + c.Status.ClusterStatus = iCassandra.Status.ClusterStatus + err = r.Status().Patch(context.Background(), c, patch) if err != nil { return err } @@ -714,9 +822,9 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc } err = exposeservice.Create(r.Client, - cassandra.Name, - cassandra.Namespace, - cassandra.Spec.PrivateNetworkCluster, + c.Name, + c.Namespace, + c.Spec.PrivateNetworkCluster, nodes, models.CassandraConnectionPort) if err != nil { @@ -726,49 +834,49 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc } if iCassandra.Status.CurrentClusterOperationStatus == models.NoOperation && - cassandra.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent && - cassandra.Annotations[models.UpdateQueuedAnnotation] != models.True && - !cassandra.Spec.IsEqual(iCassandra.Spec) { - l.Info(msgExternalChanges, "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec) + c.Annotations[models.ResourceStateAnnotation] != models.UpdatingEvent && + c.Annotations[models.UpdateQueuedAnnotation] != models.True && + !c.Spec.IsEqual(iCassandra.Spec) { + l.Info(msgExternalChanges, "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec) - patch := cassandra.NewPatch() - cassandra.Annotations[models.ExternalChangesAnnotation] = models.True + patch := c.NewPatch() + c.Annotations[models.ExternalChangesAnnotation] = models.True - err = r.Patch(context.Background(), cassandra, patch) + err = r.Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch cluster cluster", - "cluster name", cassandra.Spec.Name, "cluster state", cassandra.Status.State) + "cluster name", c.Spec.Name, "cluster state", c.Status.State) return err } - msgDiffSpecs, err := createSpecDifferenceMessage(cassandra.Spec, iCassandra.Spec) + msgDiffSpecs, err := createSpecDifferenceMessage(c.Spec, iCassandra.Spec) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", iCassandra.Spec, "k8s resource spec", cassandra.Spec) + "instaclustr data", iCassandra.Spec, "k8s resource spec", c.Spec) return err } - r.EventRecorder.Eventf(cassandra, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(c, models.Warning, models.ExternalChanges, msgDiffSpecs) } //TODO: change all context.Background() and context.TODO() to ctx from Reconcile - err = r.reconcileMaintenanceEvents(context.Background(), cassandra) + err = r.reconcileMaintenanceEvents(context.Background(), c) if err != nil { l.Error(err, "Cannot reconcile cluster maintenance events", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) return err } - if cassandra.Status.State == models.RunningStatus && cassandra.Status.CurrentClusterOperationStatus == models.OperationInProgress { - patch := cassandra.NewPatch() - for _, dc := range cassandra.Status.DataCentres { + if c.Status.State == models.RunningStatus && c.Status.CurrentClusterOperationStatus == models.OperationInProgress { + patch := c.NewPatch() + for _, dc := range c.Status.DataCentres { resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID) if err != nil { l.Error(err, "Cannot get data centre resize operations", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, "data centre ID", dc.ID, ) @@ -776,11 +884,11 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc } dc.ResizeOperations = resizeOperations - err = r.Status().Patch(context.Background(), cassandra, patch) + err = r.Status().Patch(context.Background(), c, patch) if err != nil { l.Error(err, "Cannot patch data centre resize operations", - "cluster name", cassandra.Spec.Name, - "cluster ID", cassandra.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, "data centre ID", dc.ID, ) @@ -793,12 +901,12 @@ func (r *CassandraReconciler) newWatchStatusJob(cassandra *v1beta1.Cassandra) sc } } -func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) scheduler.Job { +func (r *CassandraReconciler) newWatchBackupsJob(c *v1beta1.Cassandra) scheduler.Job { l := log.Log.WithValues("component", "cassandraBackupsClusterJob") return func() error { ctx := context.Background() - err := r.Get(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name}, cluster) + err := r.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: c.Name}, c) if err != nil { if k8serrors.IsNotFound(err) { return nil @@ -807,11 +915,11 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch return err } - iBackups, err := r.API.GetClusterBackups(cluster.Status.ID, models.ClusterKindsMap[cluster.Kind]) + iBackups, err := r.API.GetClusterBackups(c.Status.ID, models.ClusterKindsMap[c.Kind]) if err != nil { l.Error(err, "Cannot get cluster backups", - "cluster name", cluster.Spec.Name, - "cluster ID", cluster.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) return err @@ -819,11 +927,11 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch iBackupEvents := iBackups.GetBackupEvents(models.CassandraKind) - k8sBackupList, err := r.listClusterBackups(ctx, cluster.Status.ID, cluster.Namespace) + k8sBackupList, err := r.listClusterBackups(ctx, c.Status.ID, c.Namespace) if err != nil { l.Error(err, "Cannot list cluster backups", - "cluster name", cluster.Spec.Name, - "cluster ID", cluster.Status.ID, + "cluster name", c.Spec.Name, + "cluster ID", c.Status.ID, ) return err @@ -887,7 +995,7 @@ func (r *CassandraReconciler) newWatchBackupsJob(cluster *v1beta1.Cassandra) sch continue } - backupSpec := cluster.NewBackupSpec(start) + backupSpec := c.NewBackupSpec(start) err = r.Create(ctx, backupSpec) if err != nil { return err diff --git a/controllers/clusters/helpers.go b/controllers/clusters/helpers.go index 6ee2bb09f..e35ce6d4a 100644 --- a/controllers/clusters/helpers.go +++ b/controllers/clusters/helpers.go @@ -117,26 +117,31 @@ func isDataCentreNodesEqual(a, b []*v1beta1.Node) bool { if a == nil && b == nil { return true } - if len(a) != len(b) { return false } for i := range a { - if a[i].ID != b[i].ID { - continue - } + var eq bool + for j := range b { + if a[i].ID != b[j].ID { + continue + } - if a[i].Size != b[i].Size || - a[i].PublicAddress != b[i].PublicAddress || - a[i].PrivateAddress != b[i].PrivateAddress || - a[i].Status != b[i].Status || - !slices.Equal(a[i].Roles, b[i].Roles) || - a[i].Rack != b[i].Rack { + if a[i].Size != b[j].Size || + a[i].PublicAddress != b[j].PublicAddress || + a[i].PrivateAddress != b[j].PrivateAddress || + a[i].Status != b[j].Status || + !slices.Equal(a[i].Roles, b[j].Roles) || + a[i].Rack != b[j].Rack { + return false + } + eq = true + } + if !eq { return false } } - return true } diff --git a/controllers/clusters/kafka_controller.go b/controllers/clusters/kafka_controller.go index d076e73c0..9acd59b04 100644 --- a/controllers/clusters/kafka_controller.go +++ b/controllers/clusters/kafka_controller.go @@ -57,6 +57,14 @@ type KafkaReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkas/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkas/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -66,8 +74,8 @@ type KafkaReconciler struct { func (r *KafkaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) - var kafka v1beta1.Kafka - err := r.Client.Get(ctx, req.NamespacedName, &kafka) + var k v1beta1.Kafka + err := r.Client.Get(ctx, req.NamespacedName, &k) if err != nil { if k8serrors.IsNotFound(err) { l.Info("Kafka custom resource is not found", "namespaced name ", req.NamespacedName) @@ -78,39 +86,39 @@ func (r *KafkaReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl return reconcile.Result{}, err } - switch kafka.Annotations[models.ResourceStateAnnotation] { + switch k.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.handleCreateCluster(ctx, &kafka, l) + return r.handleCreateCluster(ctx, &k, l) case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, &kafka, l) + return r.handleUpdateCluster(ctx, &k, l) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, &kafka, l) + return r.handleDeleteCluster(ctx, &k, l) case models.GenericEvent: - l.Info("Event isn't handled", "cluster name", kafka.Spec.Name, "request", req, - "event", kafka.Annotations[models.ResourceStateAnnotation]) + l.Info("Event isn't handled", "cluster name", k.Spec.Name, "request", req, + "event", k.Annotations[models.ResourceStateAnnotation]) return models.ExitReconcile, nil } return models.ExitReconcile, nil } -func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { +func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { l = l.WithName("Kafka creation Event") var err error - if kafka.Status.ID == "" { + if k.Status.ID == "" { l.Info("Creating cluster", - "cluster name", kafka.Spec.Name, - "data centres", kafka.Spec.DataCentres) + "cluster name", k.Spec.Name, + "data centres", k.Spec.DataCentres) - patch := kafka.NewPatch() - kafka.Status.ID, err = r.API.CreateCluster(instaclustr.KafkaEndpoint, kafka.Spec.ToInstAPI()) + patch := k.NewPatch() + k.Status.ID, err = r.API.CreateCluster(instaclustr.KafkaEndpoint, k.Spec.ToInstAPI()) if err != nil { l.Error(err, "Cannot create cluster", - "spec", kafka.Spec, + "spec", k.Spec, ) r.EventRecorder.Eventf( - kafka, models.Warning, models.CreationFailed, + k, models.Warning, models.CreationFailed, "Cluster creation on the Instaclustr is failed. Reason: %v", err, ) @@ -118,33 +126,33 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta } r.EventRecorder.Eventf( - kafka, models.Normal, models.Created, + k, models.Normal, models.Created, "Cluster creation request is sent. Cluster ID: %s", - kafka.Status.ID, + k.Status.ID, ) - err = r.Status().Patch(ctx, kafka, patch) + err = r.Status().Patch(ctx, k, patch) if err != nil { l.Error(err, "Cannot patch cluster status", - "spec", kafka.Spec, + "spec", k.Spec, ) r.EventRecorder.Eventf( - kafka, models.Warning, models.PatchFailed, + k, models.Warning, models.PatchFailed, "Cluster resource status patch is failed. Reason: %v", err, ) return reconcile.Result{}, err } - kafka.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent - controllerutil.AddFinalizer(kafka, models.DeletionFinalizer) - err = r.Patch(ctx, kafka, patch) + k.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent + controllerutil.AddFinalizer(k, models.DeletionFinalizer) + err = r.Patch(ctx, k, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "name", kafka.Spec.Name, + "name", k.Spec.Name, ) r.EventRecorder.Eventf( - kafka, models.Warning, models.PatchFailed, + k, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) @@ -152,17 +160,17 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta } l.Info("Cluster has been created", - "cluster ID", kafka.Status.ID, + "cluster ID", k.Status.ID, ) } - if kafka.Status.State != models.DeletedStatus { - err = r.startClusterStatusJob(kafka) + if k.Status.State != models.DeletedStatus { + err = r.startClusterStatusJob(k) if err != nil { l.Error(err, "Cannot start cluster status job", - "cluster ID", kafka.Status.ID) + "cluster ID", k.Status.ID) r.EventRecorder.Eventf( - kafka, models.Warning, models.CreationFailed, + k, models.Warning, models.CreationFailed, "Cluster status check job creation is failed. Reason: %v", err, ) @@ -170,24 +178,102 @@ func (r *KafkaReconciler) handleCreateCluster(ctx context.Context, kafka *v1beta } r.EventRecorder.Eventf( - kafka, models.Normal, models.Created, + k, models.Normal, models.Created, "Cluster status check job is started", ) - if kafka.Spec.UserRefs != nil { - err = r.startUsersCreationJob(kafka) + if k.Spec.UserRefs != nil { + err = r.startUsersCreationJob(k) if err != nil { l.Error(err, "Failed to start user creation job") - r.EventRecorder.Eventf(kafka, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(k, models.Warning, models.CreationFailed, "User creation job is failed. Reason: %v", err, ) return reconcile.Result{}, err } - r.EventRecorder.Event(kafka, models.Normal, models.Created, + r.EventRecorder.Event(k, models.Normal, models.Created, "Cluster user creation job is started", ) } + + if k.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetKafka(k.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", k.Spec.Name, + "data centres", k.Spec.DataCentres, + "cluster ID", k.Status.ID, + ) + r.EventRecorder.Eventf( + k, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iKafka, err := k.FromInstAPI(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID, + ) + r.EventRecorder.Eventf( + k, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + k, + r.EventRecorder, + iKafka.Status.ClusterStatus, + k.Spec.OnPremisesSpec, + newExposePorts(k.GetExposePorts()), + k.GetHeadlessPorts(), + k.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", k.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + k, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterOnPremisesIPsJob(k, bootstrap) + if err != nil { + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", k.Status.ID, + ) + + r.EventRecorder.Eventf( + k, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", k.Spec.Name, + "on-premises Spec", k.Spec.OnPremisesSpec, + "cluster ID", k.Status.ID, + ) + return models.ExitReconcile, nil + } } return models.ExitReconcile, nil @@ -212,7 +298,7 @@ func (r *KafkaReconciler) handleUpdateCluster( return reconcile.Result{}, err } - if iKafka.Status.ClusterStatus.State != StatusRUNNING { + if iKafka.Status.ClusterStatus.State != models.RunningStatus { l.Error(instaclustr.ClusterNotRunning, "Unable to update cluster, cluster still not running", "cluster name", k.Spec.Name, "cluster state", iKafka.Status.ClusterStatus.State) @@ -359,35 +445,35 @@ func (r *KafkaReconciler) handleExternalChanges(k, ik *v1beta1.Kafka, l logr.Log return models.ExitReconcile, nil } -func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, kafka *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { +func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, k *v1beta1.Kafka, l logr.Logger) (reconcile.Result, error) { l = l.WithName("Kafka deletion Event") - _, err := r.API.GetKafka(kafka.Status.ID) + _, err := r.API.GetKafka(k.Status.ID) if err != nil && !errors.Is(err, instaclustr.NotFound) { l.Error(err, "Cannot get cluster from the Instaclustr API", - "cluster name", kafka.Spec.Name, - "cluster state", kafka.Status.ClusterStatus.State) + "cluster name", k.Spec.Name, + "cluster state", k.Status.ClusterStatus.State) r.EventRecorder.Eventf( - kafka, models.Warning, models.FetchFailed, + k, models.Warning, models.FetchFailed, "Cluster resource fetch from the Instaclustr API is failed. Reason: %v", err, ) return reconcile.Result{}, err } - patch := kafka.NewPatch() + patch := k.NewPatch() if !errors.Is(err, instaclustr.NotFound) { l.Info("Sending cluster deletion to the Instaclustr API", - "cluster name", kafka.Spec.Name, - "cluster ID", kafka.Status.ID) + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID) - err = r.API.DeleteCluster(kafka.Status.ID, instaclustr.KafkaEndpoint) + err = r.API.DeleteCluster(k.Status.ID, instaclustr.KafkaEndpoint) if err != nil { l.Error(err, "Cannot delete cluster", - "cluster name", kafka.Spec.Name, - "cluster state", kafka.Status.ClusterStatus.State) + "cluster name", k.Spec.Name, + "cluster state", k.Status.ClusterStatus.State) r.EventRecorder.Eventf( - kafka, models.Warning, models.DeletionFailed, + k, models.Warning, models.DeletionFailed, "Cluster deletion is failed on the Instaclustr. Reason: %v", err, ) @@ -395,82 +481,110 @@ func (r *KafkaReconciler) handleDeleteCluster(ctx context.Context, kafka *v1beta } r.EventRecorder.Eventf( - kafka, models.Normal, models.DeletionStarted, + k, models.Normal, models.DeletionStarted, "Cluster deletion request is sent to the Instaclustr API.", ) - if kafka.Spec.TwoFactorDelete != nil { - kafka.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - kafka.Annotations[models.ClusterDeletionAnnotation] = models.Triggered - err = r.Patch(ctx, kafka, patch) + if k.Spec.TwoFactorDelete != nil { + k.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent + k.Annotations[models.ClusterDeletionAnnotation] = models.Triggered + err = r.Patch(ctx, k, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", kafka.Spec.Name, - "cluster state", kafka.Status.State) + "cluster name", k.Spec.Name, + "cluster state", k.Status.State) r.EventRecorder.Eventf( - kafka, models.Warning, models.PatchFailed, + k, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) return reconcile.Result{}, err } - l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", kafka.Status.ID) + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", k.Status.ID) - r.EventRecorder.Event(kafka, models.Normal, models.DeletionStarted, + r.EventRecorder.Event(k, models.Normal, models.DeletionStarted, "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") return models.ExitReconcile, nil } } - err = detachUsers(ctx, r.Client, r, kafka) + err = detachUsers(ctx, r.Client, r, k) if err != nil { l.Error(err, "Failed to detach users from the cluster") - r.EventRecorder.Eventf(kafka, models.Warning, models.DeletionFailed, + r.EventRecorder.Eventf(k, models.Warning, models.DeletionFailed, "Detaching users from the cluster is failed. Reason: %w", err, ) return reconcile.Result{}, err } - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker)) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator)) - controllerutil.RemoveFinalizer(kafka, models.DeletionFinalizer) - kafka.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent - err = r.Patch(ctx, kafka, patch) + if k.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, k.Status.ID, k.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", k.Status.ID) + r.EventRecorder.Eventf(k, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", k.Status.ID) + r.EventRecorder.Eventf(k, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + r.Scheduler.RemoveJob(k.GetJobID(scheduler.OnPremisesIPsChecker)) + } + + r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) + controllerutil.RemoveFinalizer(k, models.DeletionFinalizer) + k.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent + err = r.Patch(ctx, k, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", kafka.Spec.Name) + "cluster name", k.Spec.Name) r.EventRecorder.Eventf( - kafka, models.Warning, models.PatchFailed, + k, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err, ) return reconcile.Result{}, err } - err = exposeservice.Delete(r.Client, kafka.Name, kafka.Namespace) + err = exposeservice.Delete(r.Client, k.Name, k.Namespace) if err != nil { l.Error(err, "Cannot delete Kafka cluster expose service", - "cluster ID", kafka.Status.ID, - "cluster name", kafka.Spec.Name, + "cluster ID", k.Status.ID, + "cluster name", k.Spec.Name, ) return reconcile.Result{}, err } l.Info("Cluster was deleted", - "cluster name", kafka.Spec.Name, - "cluster ID", kafka.Status.ID) + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID) r.EventRecorder.Eventf( - kafka, models.Normal, models.Deleted, + k, models.Normal, models.Deleted, "Cluster resource is deleted", ) return models.ExitReconcile, nil } +func (r *KafkaReconciler) startClusterOnPremisesIPsJob(k *v1beta1.Kafka, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(k.Kind, b) + + err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + func (r *KafkaReconciler) startClusterStatusJob(kafka *v1beta1.Kafka) error { job := r.newWatchStatusJob(kafka) @@ -482,61 +596,61 @@ func (r *KafkaReconciler) startClusterStatusJob(kafka *v1beta1.Kafka) error { return nil } -func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job { +func (r *KafkaReconciler) newWatchStatusJob(k *v1beta1.Kafka) scheduler.Job { l := log.Log.WithValues("component", "kafkaStatusClusterJob") return func() error { - namespacedName := client.ObjectKeyFromObject(kafka) - err := r.Get(context.Background(), namespacedName, kafka) + namespacedName := client.ObjectKeyFromObject(k) + err := r.Get(context.Background(), namespacedName, k) if k8serrors.IsNotFound(err) { l.Info("Resource is not found in the k8s cluster. Closing Instaclustr status sync.", "namespaced name", namespacedName) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker)) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.BackupsChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) return nil } if err != nil { l.Error(err, "Cannot get cluster resource", - "resource name", kafka.Name) + "resource name", k.Name) return err } - iData, err := r.API.GetKafka(kafka.Status.ID) + iData, err := r.API.GetKafka(k.Status.ID) if err != nil { if errors.Is(err, instaclustr.NotFound) { - if kafka.DeletionTimestamp != nil { - _, err = r.handleDeleteCluster(context.Background(), kafka, l) + if k.DeletionTimestamp != nil { + _, err = r.handleDeleteCluster(context.Background(), k, l) return err } - return r.handleExternalDelete(context.Background(), kafka) + return r.handleExternalDelete(context.Background(), k) } - l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", kafka.Status.ID) + l.Error(err, "Cannot get cluster from the Instaclustr", "cluster ID", k.Status.ID) return err } - iKafka, err := kafka.FromInstAPI(iData) + iKafka, err := k.FromInstAPI(iData) if err != nil { l.Error(err, "Cannot convert cluster from the Instaclustr API", - "cluster ID", kafka.Status.ID, + "cluster ID", k.Status.ID, ) return err } - if !areStatusesEqual(&kafka.Status.ClusterStatus, &iKafka.Status.ClusterStatus) { + if !areStatusesEqual(&k.Status.ClusterStatus, &iKafka.Status.ClusterStatus) { l.Info("Kafka status of k8s is different from Instaclustr. Reconcile k8s resource status..", "instacluster status", iKafka.Status, - "k8s status", kafka.Status.ClusterStatus) + "k8s status", k.Status.ClusterStatus) - areDCsEqual := areDataCentresEqual(iKafka.Status.ClusterStatus.DataCentres, kafka.Status.ClusterStatus.DataCentres) + areDCsEqual := areDataCentresEqual(iKafka.Status.ClusterStatus.DataCentres, k.Status.ClusterStatus.DataCentres) - patch := kafka.NewPatch() - kafka.Status.ClusterStatus = iKafka.Status.ClusterStatus - err = r.Status().Patch(context.Background(), kafka, patch) + patch := k.NewPatch() + k.Status.ClusterStatus = iKafka.Status.ClusterStatus + err = r.Status().Patch(context.Background(), k, patch) if err != nil { l.Error(err, "Cannot patch cluster cluster", - "cluster name", kafka.Spec.Name, "cluster state", kafka.Status.State) + "cluster name", k.Spec.Name, "cluster state", k.Status.State) return err } @@ -548,9 +662,9 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job } err = exposeservice.Create(r.Client, - kafka.Name, - kafka.Namespace, - kafka.Spec.PrivateNetworkCluster, + k.Name, + k.Namespace, + k.Spec.PrivateNetworkCluster, nodes, models.KafkaConnectionPort) if err != nil { @@ -560,49 +674,49 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job } if iKafka.Status.CurrentClusterOperationStatus == models.NoOperation && - kafka.Annotations[models.UpdateQueuedAnnotation] != models.True && - !kafka.Spec.IsEqual(iKafka.Spec) { + k.Annotations[models.UpdateQueuedAnnotation] != models.True && + !k.Spec.IsEqual(iKafka.Spec) { - patch := kafka.NewPatch() - kafka.Annotations[models.ExternalChangesAnnotation] = models.True + patch := k.NewPatch() + k.Annotations[models.ExternalChangesAnnotation] = models.True - err = r.Patch(context.Background(), kafka, patch) + err = r.Patch(context.Background(), k, patch) if err != nil { l.Error(err, "Cannot patch cluster cluster", - "cluster name", kafka.Spec.Name, "cluster state", kafka.Status.State) + "cluster name", k.Spec.Name, "cluster state", k.Status.State) return err } l.Info("The k8s specification is different from Instaclustr Console. Update operations are blocked.", - "instaclustr data", iKafka.Spec, "k8s resource spec", kafka.Spec) + "instaclustr data", iKafka.Spec, "k8s resource spec", k.Spec) - msgDiffSpecs, err := createSpecDifferenceMessage(kafka.Spec, iKafka.Spec) + msgDiffSpecs, err := createSpecDifferenceMessage(k.Spec, iKafka.Spec) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", iKafka.Spec, "k8s resource spec", kafka.Spec) + "instaclustr data", iKafka.Spec, "k8s resource spec", k.Spec) return err } - r.EventRecorder.Eventf(kafka, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(k, models.Warning, models.ExternalChanges, msgDiffSpecs) } //TODO: change all context.Background() and context.TODO() to ctx from Reconcile - err = r.reconcileMaintenanceEvents(context.Background(), kafka) + err = r.reconcileMaintenanceEvents(context.Background(), k) if err != nil { l.Error(err, "Cannot reconcile cluster maintenance events", - "cluster name", kafka.Spec.Name, - "cluster ID", kafka.Status.ID, + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID, ) return err } - if kafka.Status.State == models.RunningStatus && kafka.Status.CurrentClusterOperationStatus == models.OperationInProgress { - patch := kafka.NewPatch() - for _, dc := range kafka.Status.DataCentres { + if k.Status.State == models.RunningStatus && k.Status.CurrentClusterOperationStatus == models.OperationInProgress { + patch := k.NewPatch() + for _, dc := range k.Status.DataCentres { resizeOperations, err := r.API.GetResizeOperationsByClusterDataCentreID(dc.ID) if err != nil { l.Error(err, "Cannot get data centre resize operations", - "cluster name", kafka.Spec.Name, - "cluster ID", kafka.Status.ID, + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID, "data centre ID", dc.ID, ) @@ -610,11 +724,11 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job } dc.ResizeOperations = resizeOperations - err = r.Status().Patch(context.Background(), kafka, patch) + err = r.Status().Patch(context.Background(), k, patch) if err != nil { l.Error(err, "Cannot patch data centre resize operations", - "cluster name", kafka.Spec.Name, - "cluster ID", kafka.Status.ID, + "cluster name", k.Spec.Name, + "cluster ID", k.Status.ID, "data centre ID", dc.ID, ) @@ -627,25 +741,25 @@ func (r *KafkaReconciler) newWatchStatusJob(kafka *v1beta1.Kafka) scheduler.Job } } -func (r *KafkaReconciler) startUsersCreationJob(kafka *v1beta1.Kafka) error { - job := r.newUsersCreationJob(kafka) +func (r *KafkaReconciler) startUsersCreationJob(k *v1beta1.Kafka) error { + job := r.newUsersCreationJob(k) - err := r.Scheduler.ScheduleJob(kafka.GetJobID(scheduler.UserCreator), scheduler.UserCreationInterval, job) + err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.UserCreator), scheduler.UserCreationInterval, job) if err != nil { return err } return nil } -func (r *KafkaReconciler) newUsersCreationJob(kafka *v1beta1.Kafka) scheduler.Job { +func (r *KafkaReconciler) newUsersCreationJob(k *v1beta1.Kafka) scheduler.Job { l := log.Log.WithValues("component", "kafkaUsersCreationJob") return func() error { ctx := context.Background() err := r.Get(ctx, types.NamespacedName{ - Namespace: kafka.Namespace, - Name: kafka.Name, - }, kafka) + Namespace: k.Namespace, + Name: k.Name, + }, k) if err != nil { if k8serrors.IsNotFound(err) { @@ -655,50 +769,50 @@ func (r *KafkaReconciler) newUsersCreationJob(kafka *v1beta1.Kafka) scheduler.Jo return err } - if kafka.Status.State != models.RunningStatus { + if k.Status.State != models.RunningStatus { l.Info("User creation job is scheduled") - r.EventRecorder.Eventf(kafka, models.Normal, models.CreationFailed, + r.EventRecorder.Eventf(k, models.Normal, models.CreationFailed, "User creation job is scheduled, cluster is not in the running state", ) return nil } - err = handleUsersChanges(ctx, r.Client, r, kafka) + err = handleUsersChanges(ctx, r.Client, r, k) if err != nil { l.Error(err, "Failed to create users for the cluster") - r.EventRecorder.Eventf(kafka, models.Warning, models.CreationFailed, + r.EventRecorder.Eventf(k, models.Warning, models.CreationFailed, "Failed to create users for the cluster. Reason: %v", err) return err } l.Info("User creation job successfully finished") - r.EventRecorder.Eventf(kafka, models.Normal, models.Created, + r.EventRecorder.Eventf(k, models.Normal, models.Created, "User creation job successfully finished", ) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) return nil } } -func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, kafka *v1beta1.Kafka) error { +func (r *KafkaReconciler) handleExternalDelete(ctx context.Context, k *v1beta1.Kafka) error { l := log.FromContext(ctx) - patch := kafka.NewPatch() - kafka.Status.State = models.DeletedStatus - err := r.Status().Patch(ctx, kafka, patch) + patch := k.NewPatch() + k.Status.State = models.DeletedStatus + err := r.Status().Patch(ctx, k, patch) if err != nil { return err } l.Info(instaclustr.MsgInstaclustrResourceNotFound) - r.EventRecorder.Eventf(kafka, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) + r.EventRecorder.Eventf(k, models.Warning, models.ExternalDeleted, instaclustr.MsgInstaclustrResourceNotFound) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.BackupsChecker)) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.UserCreator)) - r.Scheduler.RemoveJob(kafka.GetJobID(scheduler.StatusChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.BackupsChecker)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.UserCreator)) + r.Scheduler.RemoveJob(k.GetJobID(scheduler.StatusChecker)) return nil } diff --git a/controllers/clusters/kafkaconnect_controller.go b/controllers/clusters/kafkaconnect_controller.go index 07879d330..e09149ac0 100644 --- a/controllers/clusters/kafkaconnect_controller.go +++ b/controllers/clusters/kafkaconnect_controller.go @@ -55,6 +55,14 @@ type KafkaConnectReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkaconnects/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=kafkaconnects/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -64,8 +72,8 @@ type KafkaConnectReconciler struct { func (r *KafkaConnectReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { l := log.FromContext(ctx) - kafkaConnect := &v1beta1.KafkaConnect{} - err := r.Client.Get(ctx, req.NamespacedName, kafkaConnect) + kc := &v1beta1.KafkaConnect{} + err := r.Client.Get(ctx, req.NamespacedName, kc) if err != nil { if k8serrors.IsNotFound(err) { l.Error(err, "KafkaConnect resource is not found", "request", req) @@ -76,16 +84,16 @@ func (r *KafkaConnectReconciler) Reconcile(ctx context.Context, req ctrl.Request return reconcile.Result{}, err } - switch kafkaConnect.Annotations[models.ResourceStateAnnotation] { + switch kc.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.handleCreateCluster(ctx, kafkaConnect, l) + return r.handleCreateCluster(ctx, kc, l) case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, kafkaConnect, l) + return r.handleUpdateCluster(ctx, kc, l) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, kafkaConnect, l) + return r.handleDeleteCluster(ctx, kc, l) default: - l.Info("Event isn't handled", "cluster name", kafkaConnect.Spec.Name, - "request", req, "event", kafkaConnect.Annotations[models.ResourceStateAnnotation]) + l.Info("Event isn't handled", "cluster name", kc.Spec.Name, + "request", req, "event", kc.Annotations[models.ResourceStateAnnotation]) return models.ExitReconcile, nil } } @@ -178,6 +186,83 @@ func (r *KafkaConnectReconciler) handleCreateCluster(ctx context.Context, kc *v1 "Cluster status check job is started", ) } + if kc.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetKafkaConnect(kc.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", kc.Spec.Name, + "data centres", kc.Spec.DataCentres, + "cluster ID", kc.Status.ID, + ) + r.EventRecorder.Eventf( + kc, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iKafkaConnect, err := kc.FromInst(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", kc.Spec.Name, + "cluster ID", kc.Status.ID, + ) + r.EventRecorder.Eventf( + kc, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + kc, + r.EventRecorder, + iKafkaConnect.Status.ClusterStatus, + kc.Spec.OnPremisesSpec, + newExposePorts(kc.GetExposePorts()), + kc.GetHeadlessPorts(), + kc.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", kc.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + kc, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterOnPremisesIPsJob(kc, bootstrap) + if err != nil { + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", kc.Status.ID, + ) + + r.EventRecorder.Eventf( + kc, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", kc.Spec.Name, + "on-premises Spec", kc.Spec.OnPremisesSpec, + "cluster ID", kc.Status.ID, + ) + return models.ExitReconcile, nil + } return models.ExitReconcile, nil } @@ -202,7 +287,7 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1 l.Error(err, "Cannot convert Kafka Connect from Instaclustr", "ClusterID", kc.Status.ID) r.EventRecorder.Eventf( - kc, models.Warning, models.ConvertionFailed, + kc, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) @@ -292,39 +377,39 @@ func (r *KafkaConnectReconciler) handleUpdateCluster(ctx context.Context, kc *v1 return models.ExitReconcile, nil } -func (r *KafkaConnectReconciler) handleExternalChanges(k, ik *v1beta1.KafkaConnect, l logr.Logger) (reconcile.Result, error) { - if !k.Spec.IsEqual(ik.Spec) { +func (r *KafkaConnectReconciler) handleExternalChanges(kc, ik *v1beta1.KafkaConnect, l logr.Logger) (reconcile.Result, error) { + if !kc.Spec.IsEqual(ik.Spec) { l.Info(msgSpecStillNoMatch, - "specification of k8s resource", k.Spec, + "specification of k8s resource", kc.Spec, "data from Instaclustr ", ik.Spec) - msgDiffSpecs, err := createSpecDifferenceMessage(k.Spec, ik.Spec) + msgDiffSpecs, err := createSpecDifferenceMessage(kc.Spec, ik.Spec) if err != nil { l.Error(err, "Cannot create specification difference message", - "instaclustr data", ik.Spec, "k8s resource spec", k.Spec) + "instaclustr data", ik.Spec, "k8s resource spec", kc.Spec) return models.ExitReconcile, nil } - r.EventRecorder.Eventf(k, models.Warning, models.ExternalChanges, msgDiffSpecs) + r.EventRecorder.Eventf(kc, models.Warning, models.ExternalChanges, msgDiffSpecs) return models.ExitReconcile, nil } - patch := k.NewPatch() + patch := kc.NewPatch() - k.Annotations[models.ExternalChangesAnnotation] = "" + kc.Annotations[models.ExternalChangesAnnotation] = "" - err := r.Patch(context.Background(), k, patch) + err := r.Patch(context.Background(), kc, patch) if err != nil { l.Error(err, "Cannot patch cluster resource", - "cluster name", k.Spec.Name, "cluster ID", k.Status.ID) + "cluster name", kc.Spec.Name, "cluster ID", kc.Status.ID) - r.EventRecorder.Eventf(k, models.Warning, models.PatchFailed, + r.EventRecorder.Eventf(kc, models.Warning, models.PatchFailed, "Cluster resource patch is failed. Reason: %v", err) return reconcile.Result{}, err } - l.Info("External changes have been reconciled", "resource ID", k.Status.ID) - r.EventRecorder.Event(k, models.Normal, models.ExternalChanges, "External changes have been reconciled") + l.Info("External changes have been reconciled", "resource ID", kc.Status.ID) + r.EventRecorder.Event(kc, models.Normal, models.ExternalChanges, "External changes have been reconciled") return models.ExitReconcile, nil } @@ -390,6 +475,23 @@ func (r *KafkaConnectReconciler) handleDeleteCluster(ctx context.Context, kc *v1 return models.ExitReconcile, nil } + + if kc.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, kc.Status.ID, kc.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", kc.Status.ID) + r.EventRecorder.Eventf(kc, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", kc.Status.ID) + r.EventRecorder.Eventf(kc, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + r.Scheduler.RemoveJob(kc.GetJobID(scheduler.OnPremisesIPsChecker)) + } } err = deleteDefaultUserSecret(ctx, r.Client, client.ObjectKeyFromObject(kc)) @@ -472,6 +574,17 @@ func (r *KafkaConnectReconciler) createDefaultSecret(ctx context.Context, kc *v1 return nil } +func (r *KafkaConnectReconciler) startClusterOnPremisesIPsJob(k *v1beta1.KafkaConnect, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(k.Kind, b) + + err := r.Scheduler.ScheduleJob(k.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + func (r *KafkaConnectReconciler) startClusterStatusJob(kc *v1beta1.KafkaConnect) error { job := r.newWatchStatusJob(kc) diff --git a/controllers/clusters/on_premises.go b/controllers/clusters/on_premises.go new file mode 100644 index 000000000..78ca937cc --- /dev/null +++ b/controllers/clusters/on_premises.go @@ -0,0 +1,861 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusters + +import ( + "context" + "fmt" + "strings" + + k8scorev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/tools/record" + virtcorev1 "kubevirt.io/api/core/v1" + cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/instaclustr/operator/apis/clusters/v1beta1" + "github.com/instaclustr/operator/pkg/models" + "github.com/instaclustr/operator/pkg/scheduler" +) + +type onPremisesBootstrap struct { + K8sClient client.Client + K8sObject client.Object + EventRecorder record.EventRecorder + ClusterStatus v1beta1.ClusterStatus + OnPremisesSpec *v1beta1.OnPremisesSpec + ExposePorts []k8scorev1.ServicePort + HeadlessPorts []k8scorev1.ServicePort + PrivateNetworkCluster bool +} + +func newOnPremisesBootstrap( + k8sClient client.Client, + o client.Object, + e record.EventRecorder, + status v1beta1.ClusterStatus, + onPremisesSpec *v1beta1.OnPremisesSpec, + exposePorts, + headlessPorts []k8scorev1.ServicePort, + privateNetworkCluster bool, +) *onPremisesBootstrap { + return &onPremisesBootstrap{ + K8sClient: k8sClient, + K8sObject: o, + EventRecorder: e, + ClusterStatus: status, + OnPremisesSpec: onPremisesSpec, + ExposePorts: exposePorts, + HeadlessPorts: headlessPorts, + PrivateNetworkCluster: privateNetworkCluster, + } +} + +func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBootstrap) error { + if len(b.ClusterStatus.DataCentres) < 1 { + return fmt.Errorf("datacenter ID is empty") + } + + if b.PrivateNetworkCluster { + err := reconcileSSHGatewayResources(ctx, b) + if err != nil { + return err + } + } + + err := reconcileNodesResources(ctx, b) + if err != nil { + return err + } + + return nil +} + +func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) error { + gatewayDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) + if err != nil { + return err + } + + gatewayDVName := fmt.Sprintf("%s-%s", models.GatewayDVPrefix, strings.ToLower(b.K8sObject.GetName())) + gatewayDV, err := createDV( + ctx, + b, + gatewayDVName, + b.ClusterStatus.DataCentres[0].ID, + gatewayDVSize, + true, + ) + if err != nil { + return err + } + + gatewayCPU := resource.Quantity{} + gatewayCPU.Set(b.OnPremisesSpec.SSHGatewayCPU) + + gatewayMemory, err := resource.ParseQuantity(b.OnPremisesSpec.SSHGatewayMemory) + if err != nil { + return err + } + + gatewayName := fmt.Sprintf("%s-%s", models.GatewayVMPrefix, strings.ToLower(b.K8sObject.GetName())) + + gatewayVM := &virtcorev1.VirtualMachine{} + err = b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: gatewayName, + }, gatewayVM) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + gatewayVM, err = newVM( + ctx, + b, + gatewayName, + b.ClusterStatus.DataCentres[0].ID, + models.GatewayRack, + gatewayDV.Name, + gatewayCPU, + gatewayMemory) + if err != nil { + return err + } + err = b.K8sClient.Create(ctx, gatewayVM) + if err != nil { + return err + } + } + + gatewaySvcName := fmt.Sprintf("%s-%s", models.GatewaySvcPrefix, gatewayName) + gatewayExposeService := &k8scorev1.Service{} + err = b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: gatewaySvcName, + }, gatewayExposeService) + + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + gatewayExposeService = newExposeService( + b, + gatewaySvcName, + gatewayName, + b.ClusterStatus.DataCentres[0].ID, + ) + err = b.K8sClient.Create(ctx, gatewayExposeService) + if err != nil { + return err + } + } + + return nil +} + +func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error { + for i, node := range b.ClusterStatus.DataCentres[0].Nodes { + nodeOSDiskSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) + if err != nil { + return err + } + + nodeOSDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeOSDVPrefix, i, strings.ToLower(b.K8sObject.GetName())) + nodeOSDV, err := createDV( + ctx, + b, + nodeOSDiskDVName, + node.ID, + nodeOSDiskSize, + true, + ) + if err != nil { + return err + } + + nodeDataDiskDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.DataDiskSize) + if err != nil { + return err + } + + nodeDataDiskDVName := fmt.Sprintf("%s-%d-%s", models.NodeDVPrefix, i, strings.ToLower(b.K8sObject.GetName())) + nodeDataDV, err := createDV( + ctx, + b, + nodeDataDiskDVName, + node.ID, + nodeDataDiskDVSize, + false, + ) + if err != nil { + return err + } + + nodeCPU := resource.Quantity{} + nodeCPU.Set(b.OnPremisesSpec.NodeCPU) + + nodeMemory, err := resource.ParseQuantity(b.OnPremisesSpec.NodeMemory) + if err != nil { + return err + } + + nodeName := fmt.Sprintf("%s-%d-%s", models.NodeVMPrefix, i, strings.ToLower(b.K8sObject.GetName())) + + nodeVM := &virtcorev1.VirtualMachine{} + err = b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: nodeName, + }, nodeVM) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + nodeVM, err = newVM( + ctx, + b, + nodeName, + node.ID, + node.Rack, + nodeOSDV.Name, + nodeCPU, + nodeMemory, + nodeDataDV.Name, + ) + if err != nil { + return err + } + err = b.K8sClient.Create(ctx, nodeVM) + if err != nil { + return err + } + } + + if !b.PrivateNetworkCluster { + nodeExposeName := fmt.Sprintf("%s-%s", models.NodeSvcPrefix, nodeName) + nodeExposeService := &k8scorev1.Service{} + err = b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: nodeExposeName, + }, nodeExposeService) + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + nodeExposeService = newExposeService( + b, + nodeExposeName, + nodeName, + node.ID, + ) + err = b.K8sClient.Create(ctx, nodeExposeService) + if err != nil { + return err + } + } + } + + headlessServiceName := fmt.Sprintf("%s-%s", models.KubevirtSubdomain, strings.ToLower(b.K8sObject.GetName())) + headlessSVC := &k8scorev1.Service{} + err = b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: headlessServiceName, + }, headlessSVC) + + if client.IgnoreNotFound(err) != nil { + return err + } + if k8serrors.IsNotFound(err) { + headlessSVC = newHeadlessService( + b, + headlessServiceName, + ) + err = b.K8sClient.Create(ctx, headlessSVC) + if err != nil { + return err + } + } + + } + return nil +} + +func createDV( + ctx context.Context, + b *onPremisesBootstrap, + name, + nodeID string, + size resource.Quantity, + isOSDisk bool, +) (*cdiv1beta1.DataVolume, error) { + dv := &cdiv1beta1.DataVolume{} + err := b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.K8sObject.GetNamespace(), + Name: name, + }, dv) + if client.IgnoreNotFound(err) != nil { + return nil, err + } + if k8serrors.IsNotFound(err) { + dv = newDataDiskDV( + b, + name, + nodeID, + size, + isOSDisk, + ) + err = b.K8sClient.Create(ctx, dv) + if err != nil { + return nil, err + } + } + return dv, nil +} + +func newDataDiskDV( + b *onPremisesBootstrap, + name, + nodeID string, + storageSize resource.Quantity, + isOSDisk bool, +) *cdiv1beta1.DataVolume { + dvSource := &cdiv1beta1.DataVolumeSource{} + + if isOSDisk { + dvSource.HTTP = &cdiv1beta1.DataVolumeSourceHTTP{URL: b.OnPremisesSpec.OSImageURL} + } else { + dvSource.Blank = &cdiv1beta1.DataVolumeBlankImage{} + } + + return &cdiv1beta1.DataVolume{ + TypeMeta: metav1.TypeMeta{ + Kind: models.DVKind, + APIVersion: models.CDIKubevirtV1beta1APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: b.K8sObject.GetNamespace(), + Labels: map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeIDLabel: nodeID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: cdiv1beta1.DataVolumeSpec{ + Source: dvSource, + PVC: &k8scorev1.PersistentVolumeClaimSpec{ + AccessModes: []k8scorev1.PersistentVolumeAccessMode{ + k8scorev1.ReadWriteOnce, + }, + Resources: k8scorev1.ResourceRequirements{ + Requests: k8scorev1.ResourceList{ + models.Storage: storageSize, + }, + }, + StorageClassName: &b.OnPremisesSpec.StorageClassName, + }, + }, + } +} + +func newVM( + ctx context.Context, + b *onPremisesBootstrap, + vmName, + nodeID, + nodeRack, + OSDiskDVName string, + cpu, + memory resource.Quantity, + storageDVNames ...string, +) (*virtcorev1.VirtualMachine, error) { + runStrategy := virtcorev1.RunStrategyAlways + bootOrder1 := uint(1) + + cloudInitSecret := &k8scorev1.Secret{} + err := b.K8sClient.Get(ctx, types.NamespacedName{ + Namespace: b.OnPremisesSpec.CloudInitScriptRef.Namespace, + Name: b.OnPremisesSpec.CloudInitScriptRef.Name, + }, cloudInitSecret) + if err != nil { + return nil, err + } + + labelSet := map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeIDLabel: nodeID, + models.NodeRackLabel: nodeRack, + models.KubevirtDomainLabel: vmName, + } + + if nodeRack != models.GatewayRack { + labelSet[models.NodeLabel] = models.WorkerNode + } + + vm := &virtcorev1.VirtualMachine{ + TypeMeta: metav1.TypeMeta{ + Kind: models.VirtualMachineKind, + APIVersion: models.KubevirtV1APIVersion, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: vmName, + Namespace: b.K8sObject.GetNamespace(), + Labels: labelSet, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: virtcorev1.VirtualMachineSpec{ + RunStrategy: &runStrategy, + Template: &virtcorev1.VirtualMachineInstanceTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labelSet, + }, + Spec: virtcorev1.VirtualMachineInstanceSpec{ + Hostname: vmName, + Subdomain: fmt.Sprintf("%s-%s", models.KubevirtSubdomain, b.K8sObject.GetName()), + Domain: virtcorev1.DomainSpec{ + Resources: virtcorev1.ResourceRequirements{ + Requests: k8scorev1.ResourceList{ + models.CPU: cpu, + models.Memory: memory, + }, + }, + Devices: virtcorev1.Devices{ + Disks: []virtcorev1.Disk{ + { + Name: models.Boot, + BootOrder: &bootOrder1, + IO: models.Native, + Cache: models.None, + DiskDevice: virtcorev1.DiskDevice{ + Disk: &virtcorev1.DiskTarget{ + Bus: models.Virtio, + }, + }, + }, + { + Name: models.CloudInit, + DiskDevice: virtcorev1.DiskDevice{}, + Cache: models.None, + }, + }, + Interfaces: []virtcorev1.Interface{ + { + Name: models.Default, + InterfaceBindingMethod: virtcorev1.InterfaceBindingMethod{ + Bridge: &virtcorev1.InterfaceBridge{}, + }, + }, + }, + }, + }, + Volumes: []virtcorev1.Volume{ + { + Name: models.Boot, + VolumeSource: virtcorev1.VolumeSource{ + PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{ + ClaimName: OSDiskDVName, + }, + }, + }, + }, + { + Name: models.CloudInit, + VolumeSource: virtcorev1.VolumeSource{ + CloudInitNoCloud: &virtcorev1.CloudInitNoCloudSource{ + UserDataSecretRef: &k8scorev1.LocalObjectReference{ + Name: b.OnPremisesSpec.CloudInitScriptRef.Name, + }, + }, + }, + }, + }, + Networks: []virtcorev1.Network{ + { + Name: models.Default, + NetworkSource: virtcorev1.NetworkSource{ + Pod: &virtcorev1.PodNetwork{}, + }, + }, + }, + }, + }, + }, + } + + for i, dvName := range storageDVNames { + diskName := fmt.Sprintf("%s-%d-%s", models.DataDisk, i, vm.Name) + + vm.Spec.Template.Spec.Domain.Devices.Disks = append(vm.Spec.Template.Spec.Domain.Devices.Disks, virtcorev1.Disk{ + Name: diskName, + IO: models.Native, + Cache: models.None, + DiskDevice: virtcorev1.DiskDevice{ + Disk: &virtcorev1.DiskTarget{ + Bus: models.Virtio, + }, + }, + Serial: models.DataDiskSerial, + }) + + vm.Spec.Template.Spec.Volumes = append(vm.Spec.Template.Spec.Volumes, virtcorev1.Volume{ + Name: diskName, + VolumeSource: virtcorev1.VolumeSource{ + PersistentVolumeClaim: &virtcorev1.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: k8scorev1.PersistentVolumeClaimVolumeSource{ + ClaimName: dvName, + }, + }, + }, + }) + } + + return vm, nil +} + +func newExposeService( + b *onPremisesBootstrap, + svcName, + vmName, + nodeID string, +) *k8scorev1.Service { + return &k8scorev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: models.ServiceKind, + APIVersion: models.K8sAPIVersionV1, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: b.K8sObject.GetNamespace(), + Labels: map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeIDLabel: nodeID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: k8scorev1.ServiceSpec{ + Ports: b.ExposePorts, + Selector: map[string]string{ + models.KubevirtDomainLabel: vmName, + models.NodeIDLabel: nodeID, + }, + Type: models.LBType, + }, + } +} + +func newHeadlessService( + b *onPremisesBootstrap, + svcName string, +) *k8scorev1.Service { + return &k8scorev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: models.ServiceKind, + APIVersion: models.K8sAPIVersionV1, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: svcName, + Namespace: b.K8sObject.GetNamespace(), + Labels: map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + }, + Finalizers: []string{models.DeletionFinalizer}, + }, + Spec: k8scorev1.ServiceSpec{ + ClusterIP: "None", + Ports: b.HeadlessPorts, + Selector: map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeLabel: models.WorkerNode, + }, + }, + } +} + +func deleteOnPremResources(ctx context.Context, K8sClient client.Client, clusterID, ns string) error { + vms := &virtcorev1.VirtualMachineList{} + err := K8sClient.List(ctx, vms, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: clusterID, + }), + Namespace: ns, + }) + if err != nil { + return err + } + + for _, vm := range vms.Items { + err = K8sClient.Delete(ctx, &vm) + if err != nil { + return err + } + + patch := client.MergeFrom(vm.DeepCopy()) + controllerutil.RemoveFinalizer(&vm, models.DeletionFinalizer) + err = K8sClient.Patch(ctx, &vm, patch) + if err != nil { + return err + } + } + + vmis := &virtcorev1.VirtualMachineInstanceList{} + err = K8sClient.List(ctx, vmis, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: clusterID, + }), + Namespace: ns, + }) + if err != nil { + return err + } + + for _, vmi := range vmis.Items { + err = K8sClient.Delete(ctx, &vmi) + if err != nil { + return err + } + + patch := client.MergeFrom(vmi.DeepCopy()) + controllerutil.RemoveFinalizer(&vmi, models.DeletionFinalizer) + err = K8sClient.Patch(ctx, &vmi, patch) + if err != nil { + return err + } + } + + dvs := &cdiv1beta1.DataVolumeList{} + err = K8sClient.List(ctx, dvs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: clusterID, + }), + Namespace: ns, + }) + if err != nil { + return err + } + + for _, dv := range dvs.Items { + err = K8sClient.Delete(ctx, &dv) + if err != nil { + return err + } + + patch := client.MergeFrom(dv.DeepCopy()) + controllerutil.RemoveFinalizer(&dv, models.DeletionFinalizer) + err = K8sClient.Patch(ctx, &dv, patch) + if err != nil { + return err + } + } + + svcs := &k8scorev1.ServiceList{} + err = K8sClient.List(ctx, svcs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: clusterID, + }), + Namespace: ns, + }) + if err != nil { + return err + } + + for _, svc := range svcs.Items { + err = K8sClient.Delete(ctx, &svc) + if err != nil { + return err + } + + patch := client.MergeFrom(svc.DeepCopy()) + controllerutil.RemoveFinalizer(&svc, models.DeletionFinalizer) + err = K8sClient.Patch(ctx, &svc, patch) + if err != nil { + return err + } + } + + return nil +} + +func newExposePorts(sp []k8scorev1.ServicePort) []k8scorev1.ServicePort { + var ports []k8scorev1.ServicePort + ports = []k8scorev1.ServicePort{{ + Name: models.SSH, + Port: models.Port22, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: models.Port22, + }, + }, + } + + ports = append(ports, sp...) + + return ports +} + +func newWatchOnPremisesIPsJob(kind string, b *onPremisesBootstrap) scheduler.Job { + l := log.Log.WithValues("component", fmt.Sprintf("%sOnPremisesIPsCheckerJob", kind)) + + return func() error { + allNodePods := &k8scorev1.PodList{} + err := b.K8sClient.List(context.Background(), allNodePods, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeLabel: models.WorkerNode, + }), + Namespace: b.K8sObject.GetNamespace(), + }) + if err != nil { + l.Error(err, "Cannot get on-premises cluster pods", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "Fetching on-premises cluster pods is failed. Reason: %v", + err, + ) + return err + } + + l.Info("allNodePODS", "allNodePODS", allNodePods.Items) + l.Info("Nodes", "Nodes", b.ClusterStatus.DataCentres[0].Nodes) + + if len(allNodePods.Items) != len(b.ClusterStatus.DataCentres[0].Nodes) { + err = fmt.Errorf("the quantity of pods does not match the number of on-premises nodes") + l.Error(err, "Cannot compare private IPs for the cluster", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "Comparing of cluster's private IPs is failing. Reason: %v", + err, + ) + return err + } + + for _, node := range b.ClusterStatus.DataCentres[0].Nodes { + nodePods := &k8scorev1.PodList{} + err = b.K8sClient.List(context.Background(), nodePods, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeIDLabel: node.ID, + }), + Namespace: b.K8sObject.GetNamespace(), + }) + if err != nil { + l.Error(err, "Cannot get on-premises cluster pods", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "Fetching on-premises cluster pods is failed. Reason: %v", + err, + ) + return err + } + + l.Info("nodePODS", "nodePODS", nodePods.Items) + + for _, pod := range nodePods.Items { + l.Info("podIP", "podIP", pod.Status.PodIP) + l.Info("node.PrivateAddress", "node.PrivateAddress", node.PrivateAddress) + + if (pod.Status.PodIP != "" && node.PrivateAddress != "") && + (pod.Status.PodIP != node.PrivateAddress) { + + err = fmt.Errorf("private IPs was changed") + l.Error(err, "Node's private IP addresses are not equal", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + "nodeID", node.ID, + "nodeIP", node.PrivateAddress, + "podIP", pod.Status.PodIP, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "The private IP addresses of the node are not matching. Reason: %v", + err, + ) + return err + } + } + + if !b.PrivateNetworkCluster { + nodeSVCs := &k8scorev1.ServiceList{} + err = b.K8sClient.List(context.Background(), nodeSVCs, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ClusterIDLabel: b.ClusterStatus.ID, + models.NodeIDLabel: node.ID, + }), + Namespace: b.K8sObject.GetNamespace(), + }) + if err != nil { + l.Error(err, "Cannot get services backed by on-premises cluster pods", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "Fetching services backed by on-premises cluster pods is failed. Reason: %v", + err, + ) + return err + } + for _, svc := range nodeSVCs.Items { + l.Info("svcIP", "svcIP", svc.Status.LoadBalancer.Ingress[0].IP) + l.Info("node.PublicAddress", "node.PublicAddress", node.PublicAddress) + + if (svc.Status.LoadBalancer.Ingress[0].IP != "" && node.PublicAddress != "") && + (svc.Status.LoadBalancer.Ingress[0].IP != node.PublicAddress) { + + err = fmt.Errorf("public IPs was changed") + l.Error(err, "Node's public IP addresses are not equal", + "cluster name", b.K8sObject.GetName(), + "clusterID", b.ClusterStatus.ID, + "nodeID", node.ID, + "nodeIP", node.PrivateAddress, + "svcIP", svc.Status.LoadBalancer.Ingress[0].IP, + ) + + b.EventRecorder.Eventf( + b.K8sObject, models.Warning, models.CreationFailed, + "The public IP addresses of the node are not matching. Reason: %v", + err, + ) + return err + } + } + } + } + return nil + } +} diff --git a/controllers/clusters/opensearch_controller.go b/controllers/clusters/opensearch_controller.go index b34807a30..516510897 100644 --- a/controllers/clusters/opensearch_controller.go +++ b/controllers/clusters/opensearch_controller.go @@ -276,7 +276,7 @@ func (r *OpenSearchReconciler) HandleUpdateCluster( "cluster ID", o.Status.ID, ) - r.EventRecorder.Eventf(o, models.Warning, models.ConvertionFailed, + r.EventRecorder.Eventf(o, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err) return reconcile.Result{}, err diff --git a/controllers/clusters/postgresql_controller.go b/controllers/clusters/postgresql_controller.go index 046f3f383..39c04d91a 100644 --- a/controllers/clusters/postgresql_controller.go +++ b/controllers/clusters/postgresql_controller.go @@ -62,9 +62,16 @@ type PostgreSQLReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=postgresqls/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=postgresqls/finalizers,verbs=update //+kubebuilder:rbac:groups=clusterresources.instaclustr.com,resources=clusterbackups,verbs=get;list;create;update;patch;deletecollection;delete -//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;watch;create;delete;update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;watch;list +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -72,19 +79,19 @@ type PostgreSQLReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) + l := log.FromContext(ctx) pg := &v1beta1.PostgreSQL{} err := r.Client.Get(ctx, req.NamespacedName, pg) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("PostgreSQL custom resource is not found", + l.Info("PostgreSQL custom resource is not found", "resource name", req.NamespacedName, ) return models.ExitReconcile, nil } - logger.Error(err, "Unable to fetch PostgreSQL cluster", + l.Error(err, "Unable to fetch PostgreSQL cluster", "resource name", req.NamespacedName, ) @@ -93,22 +100,22 @@ func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request) switch pg.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.handleCreateCluster(ctx, pg, logger) + return r.handleCreateCluster(ctx, pg, l) case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, pg, logger) + return r.handleUpdateCluster(ctx, pg, l) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, pg, logger) + return r.handleDeleteCluster(ctx, pg, l) case models.SecretEvent: - return r.handleUpdateDefaultUserPassword(ctx, pg, logger) + return r.handleUpdateDefaultUserPassword(ctx, pg, l) case models.GenericEvent: - logger.Info("PostgreSQL resource generic event isn't handled", + l.Info("PostgreSQL resource generic event isn't handled", "cluster name", pg.Spec.Name, "request", req, "event", pg.Annotations[models.ResourceStateAnnotation], ) return models.ExitReconcile, nil default: - logger.Info("PostgreSQL resource event isn't handled", + l.Info("PostgreSQL resource event isn't handled", "cluster name", pg.Spec.Name, "request", req, "event", pg.Annotations[models.ResourceStateAnnotation], @@ -120,23 +127,23 @@ func (r *PostgreSQLReconciler) Reconcile(ctx context.Context, req ctrl.Request) func (r *PostgreSQLReconciler) handleCreateCluster( ctx context.Context, pg *v1beta1.PostgreSQL, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { - logger = logger.WithName("PostgreSQL creation event") + l = l.WithName("PostgreSQL creation event") var err error patch := pg.NewPatch() if pg.Status.ID == "" { if pg.Spec.HasRestore() { - logger.Info( + l.Info( "Creating PostgreSQL cluster from backup", "original cluster ID", pg.Spec.PgRestoreFrom.ClusterID, ) pg.Status.ID, err = r.API.RestoreCluster(pg.RestoreInfoToInstAPI(pg.Spec.PgRestoreFrom), models.PgRestoreValue) if err != nil { - logger.Error(err, "Cannot restore PostgreSQL cluster from backup", + l.Error(err, "Cannot restore PostgreSQL cluster from backup", "original cluster ID", pg.Spec.PgRestoreFrom.ClusterID, ) @@ -156,7 +163,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( pg.Status.ID, ) } else { - logger.Info( + l.Info( "Creating PostgreSQL cluster", "cluster name", pg.Spec.Name, "data centres", pg.Spec.DataCentres, @@ -166,7 +173,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( pg.Status.ID, err = r.API.CreateCluster(instaclustr.PGSQLEndpoint, pgSpec) if err != nil { - logger.Error( + l.Error( err, "Cannot create PostgreSQL cluster", "spec", pg.Spec, ) @@ -189,7 +196,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( err = r.Status().Patch(ctx, pg, patch) if err != nil { - logger.Error(err, "Cannot patch PostgreSQL resource status", + l.Error(err, "Cannot patch PostgreSQL resource status", "cluster name", pg.Spec.Name, "status", pg.Status, ) @@ -203,7 +210,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( return reconcile.Result{}, err } - logger.Info( + l.Info( "PostgreSQL resource has been created", "cluster name", pg.Name, "cluster ID", pg.Status.ID, @@ -218,7 +225,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( pg.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, pg, patch) if err != nil { - logger.Error(err, "Cannot patch PostgreSQL resource", + l.Error(err, "Cannot patch PostgreSQL resource", "cluster name", pg.Spec.Name, "status", pg.Status) @@ -232,7 +239,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( if pg.Status.State != models.DeletedStatus { err = r.startClusterStatusJob(pg) if err != nil { - logger.Error(err, "Cannot start PostgreSQL cluster status check job", + l.Error(err, "Cannot start PostgreSQL cluster status check job", "cluster ID", pg.Status.ID, ) @@ -249,9 +256,87 @@ func (r *PostgreSQLReconciler) handleCreateCluster( "Cluster status check job is started", ) + if pg.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetPostgreSQL(pg.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", pg.Spec.Name, + "data centres", pg.Spec.DataCentres, + "cluster ID", pg.Status.ID, + ) + r.EventRecorder.Eventf( + pg, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iPostgreSQL, err := pg.FromInstAPI(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", pg.Spec.Name, + "cluster ID", pg.Status.ID, + ) + r.EventRecorder.Eventf( + pg, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + pg, + r.EventRecorder, + iPostgreSQL.Status.ClusterStatus, + pg.Spec.OnPremisesSpec, + newExposePorts(pg.GetExposePorts()), + pg.GetHeadlessPorts(), + pg.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", pg.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + pg, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterOnPremisesIPsJob(pg, bootstrap) + if err != nil { + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", pg.Status.ID, + ) + + r.EventRecorder.Eventf( + pg, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", pg.Spec.Name, + "on-premises Spec", pg.Spec.OnPremisesSpec, + "cluster ID", pg.Status.ID, + ) + return models.ExitReconcile, nil + } + err = r.startClusterBackupsJob(pg) if err != nil { - logger.Error(err, "Cannot start PostgreSQL cluster backups check job", + l.Error(err, "Cannot start PostgreSQL cluster backups check job", "cluster ID", pg.Status.ID, ) @@ -271,7 +356,7 @@ func (r *PostgreSQLReconciler) handleCreateCluster( if pg.Spec.UserRefs != nil { err = r.startUsersCreationJob(pg) if err != nil { - logger.Error(err, "Failed to start user PostreSQL creation job") + l.Error(err, "Failed to start user PostreSQL creation job") r.EventRecorder.Eventf(pg, models.Warning, models.CreationFailed, "User creation job is failed. Reason: %v", err) return reconcile.Result{}, err @@ -282,9 +367,9 @@ func (r *PostgreSQLReconciler) handleCreateCluster( } } - err = r.createDefaultPassword(ctx, pg, logger) + err = r.createDefaultPassword(ctx, pg, l) if err != nil { - logger.Error(err, "Cannot create default password for PostgreSQL", + l.Error(err, "Cannot create default password for PostgreSQL", "cluster name", pg.Spec.Name, "clusterID", pg.Status.ID, ) @@ -304,13 +389,13 @@ func (r *PostgreSQLReconciler) handleCreateCluster( func (r *PostgreSQLReconciler) handleUpdateCluster( ctx context.Context, pg *v1beta1.PostgreSQL, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { - logger = logger.WithName("PostgreSQL update event") + l = l.WithName("PostgreSQL update event") iData, err := r.API.GetPostgreSQL(pg.Status.ID) if err != nil { - logger.Error( + l.Error( err, "Cannot get PostgreSQL cluster status from the Instaclustr API", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, @@ -326,14 +411,14 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( iPg, err := pg.FromInstAPI(iData) if err != nil { - logger.Error( + l.Error( err, "Cannot convert PostgreSQL cluster status from the Instaclustr API", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) r.EventRecorder.Eventf( - pg, models.Warning, models.ConvertionFailed, + pg, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) @@ -341,7 +426,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( } if iPg.Status.CurrentClusterOperationStatus != models.NoOperation { - logger.Info("PostgreSQL cluster is not ready to update", + l.Info("PostgreSQL cluster is not ready to update", "cluster name", pg.Spec.Name, "cluster status", iPg.Status.State, "current operation status", iPg.Status.CurrentClusterOperationStatus, @@ -350,7 +435,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( pg.Annotations[models.UpdateQueuedAnnotation] = models.True err = r.Patch(ctx, pg, patch) if err != nil { - logger.Error(err, "Cannot patch cluster resource", + l.Error(err, "Cannot patch cluster resource", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID) r.EventRecorder.Eventf( @@ -364,17 +449,17 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( } if pg.Annotations[models.ExternalChangesAnnotation] == models.True { - return r.handleExternalChanges(pg, iPg, logger) + return r.handleExternalChanges(pg, iPg, l) } if pg.Spec.ClusterSettingsNeedUpdate(iPg.Spec.Cluster) { - logger.Info("Updating cluster settings", + l.Info("Updating cluster settings", "instaclustr description", iPg.Spec.Description, "instaclustr two factor delete", iPg.Spec.TwoFactorDelete) err = r.API.UpdateClusterSettings(pg.Status.ID, pg.Spec.ClusterSettingsUpdateToInstAPI()) if err != nil { - logger.Error(err, "Cannot update cluster settings", + l.Error(err, "Cannot update cluster settings", "cluster ID", pg.Status.ID, "cluster spec", pg.Spec) r.EventRecorder.Eventf(pg, models.Warning, models.UpdateFailed, "Cannot update cluster settings. Reason: %v", err) @@ -384,14 +469,14 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( } if !pg.Spec.AreDCsEqual(iPg.Spec.DataCentres) { - logger.Info("Update request to Instaclustr API has been sent", + l.Info("Update request to Instaclustr API has been sent", "spec data centres", pg.Spec.DataCentres, "resize settings", pg.Spec.ResizeSettings, ) err = r.updateCluster(pg) if err != nil { - logger.Error(err, "Cannot update Data Centres", + l.Error(err, "Cannot update Data Centres", "cluster name", pg.Spec.Name, ) @@ -405,7 +490,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( pg.Annotations[models.UpdateQueuedAnnotation] = models.True err = r.Patch(ctx, pg, patch) if err != nil { - logger.Error(err, "Cannot patch PostgreSQL metadata", + l.Error(err, "Cannot patch PostgreSQL metadata", "cluster name", pg.Spec.Name, "cluster metadata", pg.ObjectMeta, ) @@ -420,7 +505,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( return reconcile.Result{}, err } - logger.Info( + l.Info( "Cluster has been updated", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, @@ -430,7 +515,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( iConfigs, err := r.API.GetPostgreSQLConfigs(pg.Status.ID) if err != nil { - logger.Error(err, "Cannot get PostgreSQL cluster configs", + l.Error(err, "Cannot get PostgreSQL cluster configs", "cluster name", pg.Spec.Name, "clusterID", pg.Status.ID, ) @@ -449,7 +534,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( pg.Spec.ClusterConfigurations, iConfig.ConfigurationProperties) if err != nil { - logger.Error(err, "Cannot reconcile PostgreSQL cluster configs", + l.Error(err, "Cannot reconcile PostgreSQL cluster configs", "cluster name", pg.Spec.Name, "clusterID", pg.Status.ID, "configs", pg.Spec.ClusterConfigurations, @@ -464,16 +549,16 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( return reconcile.Result{}, err } - logger.Info("PostgreSQL cluster configurations were updated", + l.Info("PostgreSQL cluster configurations were updated", "cluster name", pg.Spec.Name, ) } pg.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent pg.Annotations[models.UpdateQueuedAnnotation] = "" - err = r.patchClusterMetadata(ctx, pg, logger) + err = r.patchClusterMetadata(ctx, pg, l) if err != nil { - logger.Error(err, "Cannot patch PostgreSQL resource metadata", + l.Error(err, "Cannot patch PostgreSQL resource metadata", "cluster name", pg.Spec.Name, "cluster metadata", pg.ObjectMeta, ) @@ -486,7 +571,7 @@ func (r *PostgreSQLReconciler) handleUpdateCluster( return reconcile.Result{}, err } - logger.Info("PostgreSQL cluster was updated", + l.Info("PostgreSQL cluster was updated", "cluster name", pg.Spec.Name, "cluster status", pg.Status.State, ) @@ -790,13 +875,13 @@ func (r *PostgreSQLReconciler) handleExternalChanges(pg, iPg *v1beta1.PostgreSQL func (r *PostgreSQLReconciler) handleDeleteCluster( ctx context.Context, pg *v1beta1.PostgreSQL, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { - logger = logger.WithName("PostgreSQL deletion event") + l = l.WithName("PostgreSQL deletion event") _, err := r.API.GetPostgreSQL(pg.Status.ID) if err != nil && !errors.Is(err, instaclustr.NotFound) { - logger.Error(err, "Cannot get PostgreSQL cluster status", + l.Error(err, "Cannot get PostgreSQL cluster status", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -810,13 +895,13 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( } if !errors.Is(err, instaclustr.NotFound) { - logger.Info("Sending cluster deletion to the Instaclustr API", + l.Info("Sending cluster deletion to the Instaclustr API", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID) err = r.API.DeleteCluster(pg.Status.ID, instaclustr.PGSQLEndpoint) if err != nil { - logger.Error(err, "Cannot delete PostgreSQL cluster", + l.Error(err, "Cannot delete PostgreSQL cluster", "cluster name", pg.Spec.Name, "cluster status", pg.Status.State, ) @@ -839,7 +924,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( pg.Annotations[models.ClusterDeletionAnnotation] = models.Triggered err = r.Patch(ctx, pg, patch) if err != nil { - logger.Error(err, "Cannot patch cluster resource", + l.Error(err, "Cannot patch cluster resource", "cluster name", pg.Spec.Name, "cluster state", pg.Status.State) r.EventRecorder.Eventf(pg, models.Warning, models.PatchFailed, @@ -849,26 +934,43 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", pg.Status.ID) + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", pg.Status.ID) r.EventRecorder.Event(pg, models.Normal, models.DeletionStarted, "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") return models.ExitReconcile, nil } + + if pg.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, pg.Status.ID, pg.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", pg.Status.ID) + r.EventRecorder.Eventf(pg, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", pg.Status.ID) + r.EventRecorder.Eventf(pg, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + r.Scheduler.RemoveJob(pg.GetJobID(scheduler.OnPremisesIPsChecker)) + } } - logger.Info("PostgreSQL cluster is being deleted. Deleting PostgreSQL default user secret", + l.Info("PostgreSQL cluster is being deleted. Deleting PostgreSQL default user secret", "cluster ID", pg.Status.ID, ) - logger.Info("Deleting cluster backup resources", + l.Info("Deleting cluster backup resources", "cluster ID", pg.Status.ID, ) err = r.deleteBackups(ctx, pg.Status.ID, pg.Namespace) if err != nil { - logger.Error(err, "Cannot delete PostgreSQL backup resources", + l.Error(err, "Cannot delete PostgreSQL backup resources", "cluster ID", pg.Status.ID, ) r.EventRecorder.Eventf( @@ -879,7 +981,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info("Cluster backup resources were deleted", + l.Info("Cluster backup resources were deleted", "cluster ID", pg.Status.ID, ) @@ -893,7 +995,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( r.Scheduler.RemoveJob(pg.GetJobID(scheduler.StatusChecker)) for _, ref := range pg.Spec.UserRefs { - err = r.handleUsersDetach(ctx, logger, pg, ref) + err = r.handleUsersDetach(ctx, l, pg, ref) if err != nil { return reconcile.Result{}, err } @@ -901,9 +1003,9 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( controllerutil.RemoveFinalizer(pg, models.DeletionFinalizer) pg.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent - err = r.patchClusterMetadata(ctx, pg, logger) + err = r.patchClusterMetadata(ctx, pg, l) if err != nil { - logger.Error( + l.Error( err, "Cannot patch PostgreSQL resource metadata after finalizer removal", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, @@ -919,7 +1021,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( err = r.deleteSecret(ctx, pg) if client.IgnoreNotFound(err) != nil { - logger.Error(err, "Cannot delete PostgreSQL default user secret", + l.Error(err, "Cannot delete PostgreSQL default user secret", "cluster ID", pg.Status.ID, ) @@ -931,7 +1033,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info("Cluster PostgreSQL default user secret was deleted", + l.Info("Cluster PostgreSQL default user secret was deleted", "cluster ID", pg.Status.ID, ) @@ -943,7 +1045,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( err = exposeservice.Delete(r.Client, pg.Name, pg.Namespace) if err != nil { - logger.Error(err, "Cannot delete PostgreSQL cluster expose service", + l.Error(err, "Cannot delete PostgreSQL cluster expose service", "cluster ID", pg.Status.ID, "cluster name", pg.Spec.Name, ) @@ -951,7 +1053,7 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info("PostgreSQL cluster was deleted", + l.Info("PostgreSQL cluster was deleted", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -967,13 +1069,13 @@ func (r *PostgreSQLReconciler) handleDeleteCluster( func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( ctx context.Context, pg *v1beta1.PostgreSQL, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { - logger = logger.WithName("PostgreSQL default user password updating event") + l = l.WithName("PostgreSQL default user password updating event") secret, err := v1beta1.GetDefaultPgUserSecret(ctx, pg.Name, pg.Namespace, r.Client) if err != nil { - logger.Error(err, "Cannot get the default secret for the PostgreSQL cluster", + l.Error(err, "Cannot get the default secret for the PostgreSQL cluster", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -990,7 +1092,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( password := string(secret.Data[models.Password]) isValid := pg.ValidateDefaultUserPassword(password) if !isValid { - logger.Error(err, "Default PostgreSQL user password is not valid. This field must be at least 8 characters long. Must contain characters from at least 3 of the following 4 categories: Uppercase, Lowercase, Numbers, Special Characters", + l.Error(err, "Default PostgreSQL user password is not valid. This field must be at least 8 characters long. Must contain characters from at least 3 of the following 4 categories: Uppercase, Lowercase, Numbers, Special Characters", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -1006,7 +1108,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( err = r.API.UpdatePostgreSQLDefaultUserPassword(pg.Status.ID, password) if err != nil { - logger.Error(err, "Cannot update default PostgreSQL user password", + l.Error(err, "Cannot update default PostgreSQL user password", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -1021,9 +1123,9 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( } pg.Annotations[models.ResourceStateAnnotation] = models.UpdatedEvent - err = r.patchClusterMetadata(ctx, pg, logger) + err = r.patchClusterMetadata(ctx, pg, l) if err != nil { - logger.Error(err, "Cannot patch PostgreSQL resource metadata", + l.Error(err, "Cannot patch PostgreSQL resource metadata", "cluster name", pg.Spec.Name, "cluster metadata", pg.ObjectMeta, ) @@ -1036,7 +1138,7 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( return reconcile.Result{}, err } - logger.Info("PostgreSQL default user password was updated", + l.Info("PostgreSQL default user password was updated", "cluster name", pg.Spec.Name, "cluster ID", pg.Status.ID, ) @@ -1049,6 +1151,17 @@ func (r *PostgreSQLReconciler) handleUpdateDefaultUserPassword( return models.ExitReconcile, nil } +func (r *PostgreSQLReconciler) startClusterOnPremisesIPsJob(pg *v1beta1.PostgreSQL, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(pg.Kind, b) + + err := r.Scheduler.ScheduleJob(pg.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + func (r *PostgreSQLReconciler) startClusterStatusJob(pg *v1beta1.PostgreSQL) error { job := r.newWatchStatusJob(pg) @@ -1572,7 +1685,7 @@ func (r *PostgreSQLReconciler) reconcileClusterConfigurations( func (r *PostgreSQLReconciler) patchClusterMetadata( ctx context.Context, pgCluster *v1beta1.PostgreSQL, - logger logr.Logger, + l logr.Logger, ) error { patchRequest := []*v1beta1.PatchRequest{} @@ -1610,7 +1723,7 @@ func (r *PostgreSQLReconciler) patchClusterMetadata( return err } - logger.Info("PostgreSQL cluster patched", + l.Info("PostgreSQL cluster patched", "Cluster name", pgCluster.Spec.Name, "Finalizers", pgCluster.Finalizers, "Annotations", pgCluster.Annotations, diff --git a/controllers/clusters/redis_controller.go b/controllers/clusters/redis_controller.go index e11f6d03f..311e48b25 100644 --- a/controllers/clusters/redis_controller.go +++ b/controllers/clusters/redis_controller.go @@ -58,6 +58,14 @@ type RedisReconciler struct { //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=redis/status,verbs=get;update;patch //+kubebuilder:rbac:groups=clusters.instaclustr.com,resources=redis/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=events,verbs=create;patch +//+kubebuilder:rbac:groups=cdi.kubevirt.io,resources=datavolumes,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachines,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=kubevirt.io,resources=virtualmachineinstances,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. @@ -65,19 +73,19 @@ type RedisReconciler struct { // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) + l := log.FromContext(ctx) redis := &v1beta1.Redis{} err := r.Client.Get(ctx, req.NamespacedName, redis) if err != nil { if k8serrors.IsNotFound(err) { - logger.Info("Redis cluster resource is not found", + l.Info("Redis cluster resource is not found", "resource name", req.NamespacedName, ) return models.ExitReconcile, nil } - logger.Error(err, "Unable to fetch Redis cluster", + l.Error(err, "Unable to fetch Redis cluster", "resource name", req.NamespacedName, ) return models.ExitReconcile, nil @@ -85,19 +93,19 @@ func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl switch redis.Annotations[models.ResourceStateAnnotation] { case models.CreatingEvent: - return r.handleCreateCluster(ctx, redis, logger) + return r.handleCreateCluster(ctx, redis, l) case models.UpdatingEvent: - return r.handleUpdateCluster(ctx, redis, logger) + return r.handleUpdateCluster(ctx, redis, l) case models.DeletingEvent: - return r.handleDeleteCluster(ctx, redis, logger) + return r.handleDeleteCluster(ctx, redis, l) case models.GenericEvent: - logger.Info("Redis generic event isn't handled", + l.Info("Redis generic event isn't handled", "cluster name", redis.Spec.Name, "request", req, ) return models.ExitReconcile, nil default: - logger.Info("Unknown event isn't handled", + l.Info("Unknown event isn't handled", "cluster name", redis.Spec.Name, "request", req, ) @@ -108,20 +116,20 @@ func (r *RedisReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl func (r *RedisReconciler) handleCreateCluster( ctx context.Context, redis *v1beta1.Redis, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { var err error if redis.Status.ID == "" { var id string if redis.Spec.HasRestore() { - logger.Info( + l.Info( "Creating Redis cluster from backup", "original cluster ID", redis.Spec.RestoreFrom.ClusterID, ) id, err = r.API.RestoreCluster(redis.RestoreInfoToInstAPI(redis.Spec.RestoreFrom), models.RedisAppKind) if err != nil { - logger.Error( + l.Error( err, "Cannot restore Redis cluster from backup", "original cluster ID", redis.Spec.RestoreFrom.ClusterID, ) @@ -133,7 +141,7 @@ func (r *RedisReconciler) handleCreateCluster( return reconcile.Result{}, err } - logger.Info( + l.Info( "Redis cluster was created from backup", "original cluster ID", redis.Spec.RestoreFrom.ClusterID, ) @@ -145,7 +153,7 @@ func (r *RedisReconciler) handleCreateCluster( id, ) } else { - logger.Info( + l.Info( "Creating Redis cluster", "cluster name", redis.Spec.Name, "data centres", redis.Spec.DataCentres, @@ -153,7 +161,7 @@ func (r *RedisReconciler) handleCreateCluster( id, err = r.API.CreateCluster(instaclustr.RedisEndpoint, redis.Spec.ToInstAPI()) if err != nil { - logger.Error( + l.Error( err, "Cannot create Redis cluster", "cluster manifest", redis.Spec, ) @@ -165,7 +173,7 @@ func (r *RedisReconciler) handleCreateCluster( return reconcile.Result{}, err } - logger.Info( + l.Info( "Redis cluster was created", "cluster ID", id, "cluster name", redis.Spec.Name, @@ -181,7 +189,7 @@ func (r *RedisReconciler) handleCreateCluster( redis.Status.ID = id err = r.Status().Patch(ctx, redis, patch) if err != nil { - logger.Error(err, "Cannot patch Redis cluster status", + l.Error(err, "Cannot patch Redis cluster status", "cluster name", redis.Spec.Name) r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed, "Cluster resource status patch is failed. Reason: %v", err) @@ -189,7 +197,7 @@ func (r *RedisReconciler) handleCreateCluster( return reconcile.Result{}, err } - logger.Info("Redis resource has been created", + l.Info("Redis resource has been created", "cluster name", redis.Name, "cluster ID", redis.Status.ID, "api version", redis.APIVersion) @@ -200,7 +208,7 @@ func (r *RedisReconciler) handleCreateCluster( redis.Annotations[models.ResourceStateAnnotation] = models.CreatedEvent err = r.Patch(ctx, redis, patch) if err != nil { - logger.Error(err, "Cannot patch Redis cluster", + l.Error(err, "Cannot patch Redis cluster", "cluster name", redis.Spec.Name, "cluster metadata", redis.ObjectMeta, ) @@ -215,7 +223,7 @@ func (r *RedisReconciler) handleCreateCluster( if redis.Status.State != models.DeletedStatus { err = r.startClusterStatusJob(redis) if err != nil { - logger.Error(err, "Cannot start cluster status job", + l.Error(err, "Cannot start cluster status job", "redis cluster ID", redis.Status.ID, ) @@ -232,9 +240,87 @@ func (r *RedisReconciler) handleCreateCluster( "Cluster status check job is started", ) + if redis.Spec.OnPremisesSpec != nil { + iData, err := r.API.GetRedis(redis.Status.ID) + if err != nil { + l.Error(err, "Cannot get cluster from the Instaclustr API", + "cluster name", redis.Spec.Name, + "data centres", redis.Spec.DataCentres, + "cluster ID", redis.Status.ID, + ) + r.EventRecorder.Eventf( + redis, models.Warning, models.FetchFailed, + "Cluster fetch from the Instaclustr API is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + iRedis, err := redis.FromInstAPI(iData) + if err != nil { + l.Error( + err, "Cannot convert cluster from the Instaclustr API", + "cluster name", redis.Spec.Name, + "cluster ID", redis.Status.ID, + ) + r.EventRecorder.Eventf( + redis, models.Warning, models.ConversionFailed, + "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + bootstrap := newOnPremisesBootstrap( + r.Client, + redis, + r.EventRecorder, + iRedis.Status.ClusterStatus, + redis.Spec.OnPremisesSpec, + newExposePorts(redis.GetExposePorts()), + redis.GetHeadlessPorts(), + redis.Spec.PrivateNetworkCluster, + ) + + err = handleCreateOnPremisesClusterResources(ctx, bootstrap) + if err != nil { + l.Error( + err, "Cannot create resources for on-premises cluster", + "cluster spec", redis.Spec.OnPremisesSpec, + ) + r.EventRecorder.Eventf( + redis, models.Warning, models.CreationFailed, + "Resources creation for on-premises cluster is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + err = r.startClusterOnPremisesIPsJob(redis, bootstrap) + if err != nil { + l.Error(err, "Cannot start on-premises cluster IPs check job", + "cluster ID", redis.Status.ID, + ) + + r.EventRecorder.Eventf( + redis, models.Warning, models.CreationFailed, + "On-premises cluster IPs check job is failed. Reason: %v", + err, + ) + return reconcile.Result{}, err + } + + l.Info( + "On-premises resources have been created", + "cluster name", redis.Spec.Name, + "on-premises Spec", redis.Spec.OnPremisesSpec, + "cluster ID", redis.Status.ID, + ) + return models.ExitReconcile, nil + } + err = r.startClusterBackupsJob(redis) if err != nil { - logger.Error(err, "Cannot start Redis cluster backups check job", + l.Error(err, "Cannot start Redis cluster backups check job", "cluster ID", redis.Status.ID, ) @@ -255,7 +341,7 @@ func (r *RedisReconciler) handleCreateCluster( err = r.startUsersCreationJob(redis) if err != nil { - logger.Error(err, "Failed to start user creation job") + l.Error(err, "Failed to start user creation job") r.EventRecorder.Eventf(redis, models.Warning, models.CreationFailed, "User creation job is failed. Reason: %v", err, ) @@ -267,7 +353,7 @@ func (r *RedisReconciler) handleCreateCluster( } } - logger.Info( + l.Info( "Redis resource has been created", "cluster name", redis.Name, "cluster ID", redis.Status.ID, @@ -293,11 +379,11 @@ func (r *RedisReconciler) startUsersCreationJob(cluster *v1beta1.Redis) error { func (r *RedisReconciler) handleUpdateCluster( ctx context.Context, redis *v1beta1.Redis, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { iData, err := r.API.GetRedis(redis.Status.ID) if err != nil { - logger.Error( + l.Error( err, "Cannot get Redis cluster from the Instaclustr API", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID, @@ -313,14 +399,14 @@ func (r *RedisReconciler) handleUpdateCluster( iRedis, err := redis.FromInstAPI(iData) if err != nil { - logger.Error( + l.Error( err, "Cannot convert Redis cluster from the Instaclustr API", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID, ) r.EventRecorder.Eventf( - redis, models.Warning, models.ConvertionFailed, + redis, models.Warning, models.ConversionFailed, "Cluster convertion from the Instaclustr API to k8s resource is failed. Reason: %v", err, ) @@ -328,17 +414,17 @@ func (r *RedisReconciler) handleUpdateCluster( } if redis.Annotations[models.ExternalChangesAnnotation] == models.True { - return r.handleExternalChanges(redis, iRedis, logger) + return r.handleExternalChanges(redis, iRedis, l) } if redis.Spec.ClusterSettingsNeedUpdate(iRedis.Spec.Cluster) { - logger.Info("Updating cluster settings", + l.Info("Updating cluster settings", "instaclustr description", iRedis.Spec.Description, "instaclustr two factor delete", iRedis.Spec.TwoFactorDelete) err = r.API.UpdateClusterSettings(redis.Status.ID, redis.Spec.ClusterSettingsUpdateToInstAPI()) if err != nil { - logger.Error(err, "Cannot update cluster settings", + l.Error(err, "Cannot update cluster settings", "cluster ID", redis.Status.ID, "cluster spec", redis.Spec) r.EventRecorder.Eventf(redis, models.Warning, models.UpdateFailed, "Cannot update cluster settings. Reason: %v", err) @@ -348,14 +434,14 @@ func (r *RedisReconciler) handleUpdateCluster( } if !redis.Spec.IsEqual(iRedis.Spec) { - logger.Info("Update request to Instaclustr API has been sent", + l.Info("Update request to Instaclustr API has been sent", "spec data centres", redis.Spec.DataCentres, "resize settings", redis.Spec.ResizeSettings, ) err = r.API.UpdateRedis(redis.Status.ID, redis.Spec.DCsUpdateToInstAPI()) if err != nil { - logger.Error(err, "Cannot update Redis cluster data centres", + l.Error(err, "Cannot update Redis cluster data centres", "cluster name", redis.Spec.Name, "cluster status", redis.Status, "data centres", redis.Spec.DataCentres, @@ -370,7 +456,7 @@ func (r *RedisReconciler) handleUpdateCluster( patch := redis.NewPatch() redis.Annotations[models.UpdateQueuedAnnotation] = models.True if err := r.Patch(ctx, redis, patch); err != nil { - logger.Error(err, "Cannot patch metadata", + l.Error(err, "Cannot patch metadata", "cluster name", redis.Spec.Name, "cluster metadata", redis.ObjectMeta, ) @@ -388,7 +474,7 @@ func (r *RedisReconciler) handleUpdateCluster( err = handleUsersChanges(ctx, r.Client, r, redis) if err != nil { - logger.Error(err, "Failed to handle users changes") + l.Error(err, "Failed to handle users changes") r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed, "Handling users changes is failed. Reason: %w", err, ) @@ -400,7 +486,7 @@ func (r *RedisReconciler) handleUpdateCluster( redis.Annotations[models.UpdateQueuedAnnotation] = "" err = r.Patch(ctx, redis, patch) if err != nil { - logger.Error(err, "Cannot patch Redis cluster after update", + l.Error(err, "Cannot patch Redis cluster after update", "cluster name", redis.Spec.Name, "cluster metadata", redis.ObjectMeta, ) @@ -413,7 +499,7 @@ func (r *RedisReconciler) handleUpdateCluster( return reconcile.Result{}, err } - logger.Info( + l.Info( "Cluster has been updated", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID, @@ -463,12 +549,12 @@ func (r *RedisReconciler) handleExternalChanges(redis, iRedis *v1beta1.Redis, l func (r *RedisReconciler) handleDeleteCluster( ctx context.Context, redis *v1beta1.Redis, - logger logr.Logger, + l logr.Logger, ) (reconcile.Result, error) { _, err := r.API.GetRedis(redis.Status.ID) if err != nil && !errors.Is(err, instaclustr.NotFound) { - logger.Error(err, "Cannot get Redis cluster status from Instaclustr", + l.Error(err, "Cannot get Redis cluster status from Instaclustr", "cluster ID", redis.Status.ID, "cluster name", redis.Spec.Name, ) @@ -482,13 +568,13 @@ func (r *RedisReconciler) handleDeleteCluster( } if !errors.Is(err, instaclustr.NotFound) { - logger.Info("Sending cluster deletion to the Instaclustr API", + l.Info("Sending cluster deletion to the Instaclustr API", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID) err = r.API.DeleteCluster(redis.Status.ID, instaclustr.RedisEndpoint) if err != nil { - logger.Error(err, "Cannot delete Redis cluster", + l.Error(err, "Cannot delete Redis cluster", "cluster name", redis.Spec.Name, "cluster status", redis.Status.State, ) @@ -511,7 +597,7 @@ func (r *RedisReconciler) handleDeleteCluster( redis.Annotations[models.ClusterDeletionAnnotation] = models.Triggered err = r.Patch(ctx, redis, patch) if err != nil { - logger.Error(err, "Cannot patch cluster resource", + l.Error(err, "Cannot patch cluster resource", "cluster name", redis.Spec.Name, "cluster state", redis.Status.State) r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed, @@ -521,25 +607,41 @@ func (r *RedisReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", redis.Status.ID) + l.Info(msgDeleteClusterWithTwoFactorDelete, "cluster ID", redis.Status.ID) r.EventRecorder.Event(redis, models.Normal, models.DeletionStarted, "Two-Factor Delete is enabled, please confirm cluster deletion via email or phone.") return models.ExitReconcile, nil } + if redis.Spec.OnPremisesSpec != nil { + err = deleteOnPremResources(ctx, r.Client, redis.Status.ID, redis.Namespace) + if err != nil { + l.Error(err, "Cannot delete cluster on-premises resources", + "cluster ID", redis.Status.ID) + r.EventRecorder.Eventf(redis, models.Warning, models.DeletionFailed, + "Cluster on-premises resources deletion is failed. Reason: %v", err) + return reconcile.Result{}, err + } + + l.Info("Cluster on-premises resources are deleted", + "cluster ID", redis.Status.ID) + r.EventRecorder.Eventf(redis, models.Normal, models.Deleted, + "Cluster on-premises resources are deleted") + r.Scheduler.RemoveJob(redis.GetJobID(scheduler.OnPremisesIPsChecker)) + } } r.Scheduler.RemoveJob(redis.GetJobID(scheduler.StatusChecker)) r.Scheduler.RemoveJob(redis.GetJobID(scheduler.BackupsChecker)) - logger.Info("Deleting cluster backup resources", + l.Info("Deleting cluster backup resources", "cluster ID", redis.Status.ID, ) err = r.deleteBackups(ctx, redis.Status.ID, redis.Namespace) if err != nil { - logger.Error(err, "Cannot delete cluster backup resources", + l.Error(err, "Cannot delete cluster backup resources", "cluster ID", redis.Status.ID, ) r.EventRecorder.Eventf( @@ -555,13 +657,13 @@ func (r *RedisReconciler) handleDeleteCluster( "Cluster backup resources are deleted", ) - logger.Info("Cluster backup resources are deleted", + l.Info("Cluster backup resources are deleted", "cluster ID", redis.Status.ID, ) err = detachUsers(ctx, r.Client, r, redis) if err != nil { - logger.Error(err, "Failed to detach users from the cluster") + l.Error(err, "Failed to detach users from the cluster") r.EventRecorder.Eventf(redis, models.Warning, models.DeletionFailed, "Detaching users from the cluster is failed. Reason: %w", err, ) @@ -573,7 +675,7 @@ func (r *RedisReconciler) handleDeleteCluster( redis.Annotations[models.ResourceStateAnnotation] = models.DeletedEvent err = r.Patch(ctx, redis, patch) if err != nil { - logger.Error(err, "Cannot patch Redis cluster metadata after finalizer removal", + l.Error(err, "Cannot patch Redis cluster metadata after finalizer removal", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID, ) @@ -588,7 +690,7 @@ func (r *RedisReconciler) handleDeleteCluster( err = exposeservice.Delete(r.Client, redis.Name, redis.Namespace) if err != nil { - logger.Error(err, "Cannot delete Redis cluster expose service", + l.Error(err, "Cannot delete Redis cluster expose service", "cluster ID", redis.Status.ID, "cluster name", redis.Spec.Name, ) @@ -596,7 +698,7 @@ func (r *RedisReconciler) handleDeleteCluster( return reconcile.Result{}, err } - logger.Info("Redis cluster was deleted", + l.Info("Redis cluster was deleted", "cluster name", redis.Spec.Name, "cluster ID", redis.Status.ID, ) @@ -609,6 +711,17 @@ func (r *RedisReconciler) handleDeleteCluster( return models.ExitReconcile, nil } +func (r *RedisReconciler) startClusterOnPremisesIPsJob(redis *v1beta1.Redis, b *onPremisesBootstrap) error { + job := newWatchOnPremisesIPsJob(redis.Kind, b) + + err := r.Scheduler.ScheduleJob(redis.GetJobID(scheduler.OnPremisesIPsChecker), scheduler.ClusterStatusInterval, job) + if err != nil { + return err + } + + return nil +} + func (r *RedisReconciler) startClusterStatusJob(cluster *v1beta1.Redis) error { job := r.newWatchStatusJob(cluster) @@ -632,7 +745,7 @@ func (r *RedisReconciler) startClusterBackupsJob(cluster *v1beta1.Redis) error { } func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Job { - logger := log.Log.WithValues("component", "redisUsersCreationJob") + l := log.Log.WithValues("component", "redisUsersCreationJob") return func() error { ctx := context.Background() @@ -649,7 +762,7 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo } if redis.Status.State != models.RunningStatus { - logger.Info("User creation job is scheduled") + l.Info("User creation job is scheduled") r.EventRecorder.Eventf(redis, models.Normal, models.CreationFailed, "User creation job is scheduled, cluster is not in the running state", ) @@ -658,14 +771,14 @@ func (r *RedisReconciler) newUsersCreationJob(redis *v1beta1.Redis) scheduler.Jo err = handleUsersChanges(ctx, r.Client, r, redis) if err != nil { - logger.Error(err, "Failed to create users") + l.Error(err, "Failed to create users") r.EventRecorder.Eventf(redis, models.Warning, models.PatchFailed, "Creating users is failed. Reason: %w", err, ) return err } - logger.Info("User creation job successfully finished") + l.Info("User creation job successfully finished") r.EventRecorder.Eventf(redis, models.Normal, models.Created, "User creation job successfully finished", ) diff --git a/main.go b/main.go index f11e46971..c29ce497b 100644 --- a/main.go +++ b/main.go @@ -22,13 +22,12 @@ import ( "time" "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - - "k8s.io/apimachinery/pkg/runtime" - // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) - // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" + virtcorev1 "kubevirt.io/api/core/v1" + cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log" @@ -56,6 +55,8 @@ func init() { utilruntime.Must(clustersv1beta1.AddToScheme(scheme)) utilruntime.Must(clusterresourcesv1beta1.AddToScheme(scheme)) utilruntime.Must(kafkamanagementv1beta1.AddToScheme(scheme)) + utilruntime.Must(cdiv1beta1.AddToScheme(scheme)) + utilruntime.Must(virtcorev1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/pkg/models/on_premises.go b/pkg/models/on_premises.go new file mode 100644 index 000000000..3d0dbd278 --- /dev/null +++ b/pkg/models/on_premises.go @@ -0,0 +1,93 @@ +package models + +const ( + ONPREMISES = "ONPREMISES" + CLIENTDC = "CLIENT_DC" + + VirtualMachineKind = "VirtualMachine" + DVKind = "DataVolume" + ServiceKind = "Service" + KubevirtV1APIVersion = "kubevirt.io/v1" + CDIKubevirtV1beta1APIVersion = "cdi.kubevirt.io/v1beta1" + + KubevirtSubdomain = "kubevirt" + KubevirtDomainLabel = "kubevirt.io/domain" + NodeIDLabel = "nodeID" + NodeRackLabel = "nodeRack" + NodeLabel = "node" + NodeOSDVPrefix = "node-os-data-volume-pvc" + NodeDVPrefix = "node-data-volume-pvc" + NodeVMPrefix = "node-vm" + NodeSvcPrefix = "node-service" + WorkerNode = "worker-node" + GatewayDVPrefix = "gateway-data-volume-pvc" + GatewayVMPrefix = "gateway-vm" + GatewaySvcPrefix = "gateway-service" + GatewayRack = "ssh-gateway-rack" + IgnitionScriptSecretPrefix = "ignition-script-secret" + DataDisk = "data-disk" + + Boot = "boot" + Storage = "storage" + CPU = "cpu" + Memory = "memory" + Virtio = "virtio" + Native = "native" + None = "none" + Script = "script" + IgnitionDisk = "ignition" + Default = "default" + CloudInit = "cloud-init" + DataDiskSerial = "DATADISK" + IgnitionSerial = "IGNITION" + + LBType = "LoadBalancer" + SSH = "ssh" + Port22 = 22 + + // Cassandra + + CassandraInterNode = "cassandra-inter-node" + CassandraSSL = "cassandra-ssl" + CassandraCQL = "cassandra-cql" + CassandraJMX = "cassandra-jmx" + Port7000 = 7000 + Port7001 = 7001 + Port7199 = 7199 + Port9042 = 9042 + + // Kafka + + KafkaClient = "kafka-client" + KafkaControlPlane = "kafka-control-plane" + KafkaBroker = "kafka-broker" + Port9092 = 9092 + Port9093 = 9093 + Port9094 = 9094 + + // KafkaConnect + + KafkaConnectAPI = "kafka-connect-API" + Port8083 = 8083 + + // Cadence + + CadenceTChannel = "cadence-tchannel" + CadenceGRPC = "cadence-grpc" + CadenceWeb = "cadence-web" + Port7933 = 7933 + Port7833 = 7833 + Port8088 = 8088 + + // PostgreSQL + + PostgreSQLDB = "postgresql-db" + Port5432 = 5432 + + // Redis + + RedisDB = "redis-db" + RedisBus = "redis-bus" + Port6379 = 6379 + Port16379 = 16379 +) diff --git a/pkg/models/operator.go b/pkg/models/operator.go index 3b46ad639..399d61c49 100644 --- a/pkg/models/operator.go +++ b/pkg/models/operator.go @@ -132,7 +132,7 @@ const ( CreationFailed = "CreationFailed" FetchFailed = "FetchFailed" GenerateFailed = "GenerateFailed" - ConvertionFailed = "ConvertionFailed" + ConversionFailed = "ConversionFailed" ValidationFailed = "ValidationFailed" UpdateFailed = "UpdateFailed" ExternalChanges = "ExternalChanges" diff --git a/pkg/models/validation.go b/pkg/models/validation.go index d8afd0257..410275d55 100644 --- a/pkg/models/validation.go +++ b/pkg/models/validation.go @@ -59,6 +59,8 @@ var ( DependencyVPCs = []string{"TARGET_VPC", "VPC_PEERED", "SEPARATE_VPC"} EncryptionKeyAliasRegExp = "^[a-zA-Z0-9_-]{1}[a-zA-Z0-9 _-]*$" OpenSearchBindingIDPattern = "[\\w-]+" + MemoryRegExp = "^\\d+(Ei|Pi|Ti|Gi|Mi|Ki)?$" + StorageRegExp = "^\\d+(Gi|Ti|Pi|Ei)?$" CassandraReplicationFactors = []int{2, 3, 5} KafkaReplicationFactors = []int{3, 5} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 0d025b664..a8622ed43 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -28,9 +28,12 @@ var ClusterStatusInterval time.Duration var ClusterBackupsInterval time.Duration var UserCreationInterval time.Duration -const StatusChecker = "statusChecker" -const BackupsChecker = "backupsChecker" -const UserCreator = "userCreator" +const ( + StatusChecker = "statusChecker" + BackupsChecker = "backupsChecker" + UserCreator = "userCreator" + OnPremisesIPsChecker = "onPremisesIPsChecker" +) type Job func() error diff --git a/scripts/cloud-init-script-example.sh b/scripts/cloud-init-script-example.sh new file mode 100644 index 000000000..e01c92a3c --- /dev/null +++ b/scripts/cloud-init-script-example.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +export NEW_PASS="qwerty12345" +export SSH_PUB_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC9/SUQIwJ58Fl67lMse4YIvWTjW5xdvwLkCXvWHcnxgoY/84WmpiU58X70mlEZTgC0C5QM6HOdDfuGsE8joUDLsBa3i0SQnK33gIy6tMYxRcmhN4E2W4JOjCkefoDD00wZaiVO4oEYun9nRRx0G9ICPHXRjMnK860mnY2WBXQyzvELrjcHJqBj8fE5xODtGULF/feusYaUsdMhVKM2lVI9WPtLyoDaaTxnd/GgBhy+LiixqrhDhOoSG8ng3yfLvs1nso71WD4J/FDUmVn5WuwCckSRYeCBuYWDpGUbu8WEtrCu6bW2jHTQyWIv0XiNgzkmqG6SySzrDx4nQoOJtqbaFT+lLzmp2ZWD9skTdWNSAIQLCRPeHUPQ9e9DXXFSfIQ8d8AgiBTobeeiWxXZ4qu+85Dbr/YgUMi0RZu9uB0YuGCzpYrqkPHSOQtihqUXW8CzoAcr8r5HEGPEcG+gW2u+sRpNW3msMelxLtDprI3EwmhvKQ/aWyj9wFCqgrO9SJc= danil@danil-mint" +export BOOTSTRAP_SSH_KEY="ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAQDgeaO3zkY5v1dww3fFONPzUnEgIqJ4kUK0Usu8iFdp+TWIulw9dDeQHa+PdWXP97l5Vv1mG9ipqShEIu7/2bp13KxSblWX4iV1MYZbtimhY3UDOsPn1G3E1Ipis6y+/tosDAm8LoWaGEMcLuE5UjP6gs6K57rCEjkVGjg7vjhIypAMC0J2N2+CxK9o/Y1+LZAec+FL5cmSJoajoo9y/VYJjz/a52jJ93wRafD2uu6ObAl5gkN/+gqY4IJFSMx20sPsIRXdbiBNDqiap56ibHHPKTeZhRbdXvZfjYkHtutXnSM2xn7BjnV8CguISxS3rXlzlzRVYwSUjnKUf5SKBbeyZbCokx271vCeUd5EXfHphvW6FIOna2AI5lpCSYkw5Kho3HaPi2NjXJ9i2dCr1zpcZpCiettDuaEjxR0Cl4Jd6PrAiAEZ0Ns0u2ysVhudshVzQrq6qdd7W9/MLjbDIHdTToNjFLZA6cbE0MQf18LXwJAl+G/QrXgcVaiopUmld+68JL89Xym55LzkMhI0NiDtWufawd/NiZ6jm13Z3+atvnOimdyuqBYeFWgbtcxjs0yN9v7h7PfPh6TbiQYFx37DCfQiIucqx1GWmMijmd7HMY6Nv3UvnoTUTSn4yz1NxhEpC61N+iAInZDpeJEtULSzlEMWlbzL4t5cF+Rm1dFdq3WpZt1mi8F4DgrsgZEuLGAw22RNW3++EWYFUNnJXaYyctPrMpWQktr4DB5nQGIHF92WR8uncxTNUXfWuT29O9e+bFYh1etmq8rsCoLcxN0zFHWvcECK55aE+47lfNAR+HEjuwYW10mGU/pFmO0F9FFmcQRSw4D4tnVUgl3XjKe3bBiTa4lUrzrKkLZ6n9/buW2e7c3jbjmXdPh2R+2Msr/vfuWs9glxQf+CYEbBW6Ye4pekIyI77SaB/bVhaHtXutKxm+QWdNle8aeqiA8Ji1Ml+s75vIg+n5v6viCnl5aV33xHRFpGQJzj2ktsXl9P9d5kgal9eXJYTywC2SnVbZVLb6FGN4kPZTVwX1f+u7v7JCm4YWlbQZtwwiXKjs99AVtQnBWqQvUH5sFUkVXlHA1Y9W6wlup0r+F6URL+7Yw+d0dHByfevrJg3pvmpLb3sEpjIAZodW3dIUReE7Ku3s/q/O9foFnfRBnCcZ2QsnxI5pqNrbrundD1ApOnNXEvICvPXHBBQ44cW0hzAO+WxY5VxyG8y/kXnb48G9efkIQFkNaITJrU9SiOk6bFP4QANdS/pmaSLjJIsHixa+7vmYjRy1SVoQ/39vDUnyCbqKtO56QMH32hQLRO3Vk7NVG6o4dYjFkiaMSaqVlHKMkJQHVzlK2PW9/fjVXfkAHmmhoD debian" +echo "debian:$NEW_PASS" | chpasswd +echo "root:$NEW_PASS" | sudo chpasswd root +sudo echo "$SSH_PUB_KEY" > /home/debian/.ssh/authorized_keys +sudo echo "$BOOTSTRAP_SSH_KEY" >> /home/debian/.ssh/authorized_keys +sudo chown -R debian: /home/debian/.ssh +sudo cp /usr/share/doc/apt/examples/sources.list /etc/apt/sources.list +data_device=$(lsblk -dfn -o NAME,SERIAL | awk '$2 == "DATADISK" {print $1}') +sudo mkfs -t ext4 /dev/"${data_device}" \ No newline at end of file diff --git a/scripts/cloud-init-secret.yaml b/scripts/cloud-init-secret.yaml new file mode 100644 index 000000000..ef7ad5d0d --- /dev/null +++ b/scripts/cloud-init-secret.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Secret +metadata: + name: instaclustr-cloud-init-secret +data: + userdata: IyEvYmluL2Jhc2gKCmV4cG9ydCBORVdfUEFTUz0icXdlcnR5MTIzNDUiCmV4cG9ydCBTU0hfUFVCX0tFWT0ic3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFCZ1FDOS9TVVFJd0o1OEZsNjdsTXNlNFlJdldUalc1eGR2d0xrQ1h2V0hjbnhnb1kvODRXbXBpVTU4WDcwbWxFWlRnQzBDNVFNNkhPZERmdUdzRThqb1VETHNCYTNpMFNRbkszM2dJeTZ0TVl4UmNtaE40RTJXNEpPakNrZWZvREQwMHdaYWlWTzRvRVl1bjluUlJ4MEc5SUNQSFhSak1uSzg2MG1uWTJXQlhReXp2RUxyamNISnFCajhmRTV4T0R0R1VMRi9mZXVzWWFVc2RNaFZLTTJsVkk5V1B0THlvRGFhVHhuZC9HZ0JoeStMaWl4cXJoRGhPb1NHOG5nM3lmTHZzMW5zbzcxV0Q0Si9GRFVtVm41V3V3Q2NrU1JZZUNCdVlXRHBHVWJ1OFdFdHJDdTZiVzJqSFRReVdJdjBYaU5nemttcUc2U3lTenJEeDRuUW9PSnRxYmFGVCtsTHptcDJaV0Q5c2tUZFdOU0FJUUxDUlBlSFVQUTllOURYWEZTZklROGQ4QWdpQlRvYmVlaVd4WFo0cXUrODVEYnIvWWdVTWkwUlp1OXVCMFl1R0N6cFlycWtQSFNPUXRpaHFVWFc4Q3pvQWNyOHI1SEVHUEVjRytnVzJ1K3NScE5XM21zTWVseEx0RHBySTNFd21odktRL2FXeWo5d0ZDcWdyTzlTSmM9IGRhbmlsQGRhbmlsLW1pbnQiCmV4cG9ydCBCT09UU1RSQVBfU1NIX0tFWT0ic3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFFQVFEZ2VhTzN6a1k1djFkd3czZkZPTlB6VW5FZ0lxSjRrVUswVXN1OGlGZHArVFdJdWx3OWREZVFIYStQZFdYUDk3bDVWdjFtRzlpcHFTaEVJdTcvMmJwMTNLeFNibFdYNGlWMU1ZWmJ0aW1oWTNVRE9zUG4xRzNFMUlwaXM2eSsvdG9zREFtOExvV2FHRU1jTHVFNVVqUDZnczZLNTdyQ0Vqa1ZHamc3dmpoSXlwQU1DMEoyTjIrQ3hLOW8vWTErTFpBZWMrRkw1Y21TSm9ham9vOXkvVllKanovYTUyako5M3dSYWZEMnV1Nk9iQWw1Z2tOLytncVk0SUpGU014MjBzUHNJUlhkYmlCTkRxaWFwNTZpYkhIUEtUZVpoUmJkWHZaZmpZa0h0dXRYblNNMnhuN0JqblY4Q2d1SVN4UzNyWGx6bHpSVll3U1VqbktVZjVTS0JiZXlaYkNva3gyNzF2Q2VVZDVFWGZIcGh2VzZGSU9uYTJBSTVscENTWWt3NUtobzNIYVBpMk5qWEo5aTJkQ3IxenBjWnBDaWV0dER1YUVqeFIwQ2w0SmQ2UHJBaUFFWjBOczB1MnlzVmh1ZHNoVnpRcnE2cWRkN1c5L01MamJESUhkVFRvTmpGTFpBNmNiRTBNUWYxOExYd0pBbCtHL1FyWGdjVmFpb3BVbWxkKzY4Skw4OVh5bTU1THprTWhJME5pRHRXdWZhd2QvTmlaNmptMTNaMythdHZuT2ltZHl1cUJZZUZXZ2J0Y3hqczB5Tjl2N2g3UGZQaDZUYmlRWUZ4MzdEQ2ZRaUl1Y3F4MUdXbU1pam1kN0hNWTZOdjNVdm5vVFVUU240eXoxTnhoRXBDNjFOK2lBSW5aRHBlSkV0VUxTemxFTVdsYnpMNHQ1Y0YrUm0xZEZkcTNXcFp0MW1pOEY0RGdyc2daRXVMR0F3MjJSTlczKytFV1lGVU5uSlhhWXljdFByTXBXUWt0cjREQjVuUUdJSEY5MldSOHVuY3hUTlVYZld1VDI5TzllK2JGWWgxZXRtcThyc0NvTGN4TjB6RkhXdmNFQ0s1NWFFKzQ3bGZOQVIrSEVqdXdZVzEwbUdVL3BGbU8wRjlGRm1jUVJTdzRENHRuVlVnbDNYaktlM2JCaVRhNGxVcnpyS2tMWjZuOS9idVcyZTdjM2piam1YZFBoMlIrMk1zci92ZnVXczlnbHhRZitDWUViQlc2WWU0cGVrSXlJNzdTYUIvYlZoYUh0WHV0S3htK1FXZE5sZThhZXFpQThKaTFNbCtzNzV2SWcrbjV2NnZpQ25sNWFWMzN4SFJGcEdRSnpqMmt0c1hsOVA5ZDVrZ2FsOWVYSllUeXdDMlNuVmJaVkxiNkZHTjRrUFpUVndYMWYrdTd2N0pDbTRZV2xiUVp0d3dpWEtqczk5QVZ0UW5CV3FRdlVINXNGVWtWWGxIQTFZOVc2d2x1cDByK0Y2VVJMKzdZdytkMGRIQnlmZXZySmczcHZtcExiM3NFcGpJQVpvZFczZElVUmVFN0t1M3MvcS9POWZvRm5mUkJuQ2NaMlFzbnhJNXBxTnJicnVuZEQxQXBPbk5YRXZJQ3ZQWEhCQlE0NGNXMGh6QU8rV3hZNVZ4eUc4eS9rWG5iNDhHOWVma0lRRmtOYUlUSnJVOVNpT2s2YkZQNFFBTmRTL3BtYVNMakpJc0hpeGErN3ZtWWpSeTFTVm9RLzM5dkRVbnlDYnFLdE81NlFNSDMyaFFMUk8zVms3TlZHNm80ZFlqRmtpYU1TYXFWbEhLTWtKUUhWemxLMlBXOS9malZYZmtBSG1taG9EIGRlYmlhbiIKZWNobyAiZGViaWFuOiRORVdfUEFTUyIgfCBjaHBhc3N3ZAplY2hvICJyb290OiRORVdfUEFTUyIgfCBzdWRvIGNocGFzc3dkIHJvb3QKc3VkbyBlY2hvICIkU1NIX1BVQl9LRVkiID4gL2hvbWUvZGViaWFuLy5zc2gvYXV0aG9yaXplZF9rZXlzCnN1ZG8gZWNobyAiJEJPT1RTVFJBUF9TU0hfS0VZIiA+PiAvaG9tZS9kZWJpYW4vLnNzaC9hdXRob3JpemVkX2tleXMKc3VkbyBjaG93biAtUiBkZWJpYW46IC9ob21lL2RlYmlhbi8uc3NoCnN1ZG8gY3AgL3Vzci9zaGFyZS9kb2MvYXB0L2V4YW1wbGVzL3NvdXJjZXMubGlzdCAvZXRjL2FwdC9zb3VyY2VzLmxpc3QKZGF0YV9kZXZpY2U9JChsc2JsayAtZGZuIC1vIE5BTUUsU0VSSUFMIHwgYXdrICckMiA9PSAiREFUQURJU0siIHtwcmludCAkMX0nKQpzdWRvIG1rZnMgLXQgZXh0NCAvZGV2LyIke2RhdGFfZGV2aWNlfSI= \ No newline at end of file From 9d81f9f8c9ead939edd3c0aecac3cb5fd91dda6a Mon Sep 17 00:00:00 2001 From: Rostislav Porohnya Date: Wed, 22 Nov 2023 19:15:39 +0200 Subject: [PATCH 3/4] vendor --- go.mod | 45 +- go.sum | 65 +- .../github.com/PuerkitoBio/purell/.gitignore | 5 - .../github.com/PuerkitoBio/purell/.travis.yml | 12 - vendor/github.com/PuerkitoBio/purell/LICENSE | 12 - .../github.com/PuerkitoBio/purell/README.md | 188 -- .../github.com/PuerkitoBio/purell/purell.go | 379 --- .../github.com/PuerkitoBio/urlesc/.travis.yml | 15 - .../github.com/PuerkitoBio/urlesc/README.md | 16 - .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 -- .../github.com/emicklei/go-restful/Makefile | 5 - .../emicklei/go-restful/{ => v3}/.gitignore | 0 .../emicklei/go-restful/v3/.goconvey | 1 + .../emicklei/go-restful/{ => v3}/.travis.yml | 0 .../emicklei/go-restful/{ => v3}/CHANGES.md | 196 +- .../emicklei/go-restful/{ => v3}/LICENSE | 0 .../emicklei/go-restful/v3/Makefile | 8 + .../emicklei/go-restful/{ => v3}/README.md | 30 +- .../emicklei/go-restful/v3/SECURITY.md | 13 + .../emicklei/go-restful/{ => v3}/Srcfile | 0 .../go-restful/{ => v3}/bench_test.sh | 0 .../emicklei/go-restful/{ => v3}/compress.go | 6 +- .../go-restful/{ => v3}/compressor_cache.go | 0 .../go-restful/{ => v3}/compressor_pools.go | 0 .../go-restful/{ => v3}/compressors.go | 0 .../emicklei/go-restful/{ => v3}/constants.go | 0 .../emicklei/go-restful/{ => v3}/container.go | 15 +- .../go-restful/{ => v3}/cors_filter.go | 67 +- .../emicklei/go-restful/{ => v3}/coverage.sh | 0 .../emicklei/go-restful/{ => v3}/curly.go | 0 .../go-restful/{ => v3}/curly_route.go | 0 .../go-restful/{ => v3}/custom_verb.go | 0 .../emicklei/go-restful/{ => v3}/doc.go | 6 +- .../go-restful/{ => v3}/entity_accessors.go | 0 .../emicklei/go-restful/v3/extensions.go | 21 + .../emicklei/go-restful/{ => v3}/filter.go | 8 +- .../emicklei/go-restful/v3/filter_adapter.go | 21 + .../emicklei/go-restful/{ => v3}/json.go | 0 .../emicklei/go-restful/{ => v3}/jsoniter.go | 0 .../emicklei/go-restful/{ => v3}/jsr311.go | 10 + .../emicklei/go-restful/{ => v3}/log/log.go | 0 .../emicklei/go-restful/{ => v3}/logger.go | 2 +- .../emicklei/go-restful/{ => v3}/mime.go | 0 .../go-restful/{ => v3}/options_filter.go | 0 .../emicklei/go-restful/{ => v3}/parameter.go | 109 +- .../go-restful/{ => v3}/path_expression.go | 0 .../go-restful/{ => v3}/path_processor.go | 0 .../emicklei/go-restful/{ => v3}/request.go | 24 +- .../emicklei/go-restful/{ => v3}/response.go | 2 +- .../emicklei/go-restful/{ => v3}/route.go | 7 +- .../go-restful/{ => v3}/route_builder.go | 16 +- .../emicklei/go-restful/v3/route_reader.go | 66 + .../emicklei/go-restful/{ => v3}/router.go | 0 .../go-restful/{ => v3}/service_error.go | 0 .../go-restful/{ => v3}/web_service.go | 44 +- .../{ => v3}/web_service_container.go | 0 vendor/github.com/go-logr/logr/funcr/funcr.go | 787 +++++ .../go-openapi/jsonpointer/.travis.yml | 15 - .../go-openapi/jsonreference/.golangci.yml | 13 +- .../go-openapi/jsonreference/.travis.yml | 24 - .../jsonreference/internal/normalize_url.go | 64 + .../go-openapi/jsonreference/reference.go | 6 +- .../github.com/go-openapi/swag/.golangci.yml | 4 + vendor/github.com/go-openapi/swag/doc.go | 15 +- vendor/github.com/go-openapi/swag/loading.go | 11 +- vendor/github.com/go-openapi/swag/util.go | 17 +- vendor/github.com/go-openapi/swag/yaml.go | 252 +- .../github.com/google/go-cmp/cmp/compare.go | 83 +- .../google/go-cmp/cmp/export_panic.go | 1 + .../google/go-cmp/cmp/export_unsafe.go | 1 + .../go-cmp/cmp/internal/diff/debug_disable.go | 1 + .../go-cmp/cmp/internal/diff/debug_enable.go | 1 + .../google/go-cmp/cmp/internal/diff/diff.go | 44 +- .../cmp/internal/flags/toolchain_legacy.go | 10 - .../cmp/internal/flags/toolchain_recent.go | 10 - .../google/go-cmp/cmp/internal/value/name.go | 7 + .../cmp/internal/value/pointer_purego.go | 1 + .../cmp/internal/value/pointer_unsafe.go | 1 + .../google/go-cmp/cmp/internal/value/zero.go | 48 - .../github.com/google/go-cmp/cmp/options.go | 10 +- vendor/github.com/google/go-cmp/cmp/path.go | 22 +- .../google/go-cmp/cmp/report_compare.go | 13 +- .../google/go-cmp/cmp/report_reflect.go | 24 +- .../google/go-cmp/cmp/report_slices.go | 31 +- .../google/go-cmp/cmp/report_text.go | 1 + vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 + .../google/gofuzz/bytesource/bytesource.go | 81 + vendor/github.com/google/gofuzz/fuzz.go | 137 +- vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 181 +- .../github.com/onsi/ginkgo/v2/CONTRIBUTING.md | 2 +- vendor/github.com/onsi/ginkgo/v2/README.md | 4 +- vendor/github.com/onsi/ginkgo/v2/RELEASING.md | 8 +- .../onsi/ginkgo/v2/config/deprecated.go | 8 +- vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 156 +- .../onsi/ginkgo/v2/decorator_dsl.go | 57 +- .../onsi/ginkgo/v2/deprecated_dsl.go | 2 +- .../onsi/ginkgo/v2/internal/group.go | 276 +- .../interrupt_handler/interrupt_handler.go | 180 +- .../onsi/ginkgo/v2/internal/node.go | 449 ++- .../onsi/ginkgo/v2/internal/ordering.go | 2 +- .../ginkgo/v2/internal/output_interceptor.go | 6 +- .../v2/internal/output_interceptor_unix.go | 2 +- .../parallel_support/client_server.go | 1 + .../internal/parallel_support/http_client.go | 4 + .../internal/parallel_support/http_server.go | 9 + .../internal/parallel_support/rpc_client.go | 4 + .../parallel_support/server_handler.go | 7 + .../ginkgo/v2/internal/progress_report.go | 291 ++ .../ginkgo/v2/internal/progress_report_bsd.go | 11 + .../v2/internal/progress_report_unix.go | 11 + .../ginkgo/v2/internal/progress_report_win.go | 8 + .../onsi/ginkgo/v2/internal/spec.go | 16 + .../onsi/ginkgo/v2/internal/spec_context.go | 90 + .../onsi/ginkgo/v2/internal/suite.go | 374 ++- .../onsi/ginkgo/v2/internal/writer.go | 11 +- .../ginkgo/v2/reporters/default_reporter.go | 276 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 71 +- .../onsi/ginkgo/v2/reporters/reporter.go | 10 +- .../ginkgo/v2/reporters/teamcity_report.go | 28 +- .../onsi/ginkgo/v2/reporting_dsl.go | 25 +- vendor/github.com/onsi/ginkgo/v2/table_dsl.go | 67 +- .../onsi/ginkgo/v2/types/code_location.go | 3 +- .../github.com/onsi/ginkgo/v2/types/config.go | 23 +- .../ginkgo/v2/types/deprecation_support.go | 9 + .../github.com/onsi/ginkgo/v2/types/errors.go | 124 +- .../github.com/onsi/ginkgo/v2/types/flags.go | 2 +- .../onsi/ginkgo/v2/types/report_entry.go | 7 +- .../github.com/onsi/ginkgo/v2/types/types.go | 190 +- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/.gitignore | 1 + vendor/github.com/onsi/gomega/CHANGELOG.md | 133 + vendor/github.com/onsi/gomega/RELEASING.md | 19 +- .../github.com/onsi/gomega/format/format.go | 78 +- vendor/github.com/onsi/gomega/gomega_dsl.go | 238 +- .../onsi/gomega/internal/assertion.go | 13 +- .../onsi/gomega/internal/async_assertion.go | 432 ++- .../onsi/gomega/internal/duration_bundle.go | 16 +- .../github.com/onsi/gomega/internal/gomega.go | 72 +- .../gomega/internal/polling_signal_error.go | 106 + .../onsi/gomega/internal/vetoptdesc.go | 22 + vendor/github.com/onsi/gomega/matchers.go | 534 ++-- .../matchers/be_comparable_to_matcher.go | 49 + .../onsi/gomega/matchers/be_key_of_matcher.go | 45 + .../matchers/contain_element_matcher.go | 120 +- .../onsi/gomega/matchers/have_each_matcher.go | 65 + .../matchers/have_existing_field_matcher.go | 36 + .../onsi/gomega/matchers/have_field.go | 28 +- .../gomega/matchers/match_yaml_matcher.go | 2 +- vendor/github.com/onsi/gomega/types/types.go | 19 +- vendor/github.com/projectcalico/api/LICENSE | 176 + .../api/pkg/apis/projectcalico/v3/BUILD | 27 + .../pkg/apis/projectcalico/v3/bgpconfig.go | 160 + .../pkg/apis/projectcalico/v3/bgpfilter.go | 106 + .../api/pkg/apis/projectcalico/v3/bgppeer.go | 141 + .../apis/projectcalico/v3/blockaffinity.go | 94 + .../pkg/apis/projectcalico/v3/clusterinfo.go | 72 + .../pkg/apis/projectcalico/v3/constants.go | 52 + .../pkg/apis/projectcalico/v3/conversion.go | 183 ++ .../api/pkg/apis/projectcalico/v3/doc.go | 9 + .../pkg/apis/projectcalico/v3/felixconfig.go | 557 ++++ .../projectcalico/v3/globalnetworkpolicy.go | 129 + .../apis/projectcalico/v3/globalnetworkset.go | 63 + .../pkg/apis/projectcalico/v3/hostendpoint.go | 102 + .../pkg/apis/projectcalico/v3/ipamconfig.go | 68 + .../api/pkg/apis/projectcalico/v3/ippool.go | 147 + .../apis/projectcalico/v3/ipreservation.go | 68 + .../projectcalico/v3/kubecontrollersconfig.go | 162 + .../apis/projectcalico/v3/networkpolicy.go | 114 + .../pkg/apis/projectcalico/v3/networkset.go | 60 + .../pkg/apis/projectcalico/v3/nodestatus.go | 242 ++ .../apis/projectcalico/v3/policy_common.go | 214 ++ .../api/pkg/apis/projectcalico/v3/profile.go | 71 + .../api/pkg/apis/projectcalico/v3/register.go | 79 + .../projectcalico/v3/zz_generated.deepcopy.go | 2844 +++++++++++++++++ .../projectcalico/v3/zz_generated.defaults.go | 19 + .../api/pkg/lib/numorstring/asnumber.go | 73 + .../api/pkg/lib/numorstring/doc.go | 22 + .../api/pkg/lib/numorstring/port.go | 154 + .../api/pkg/lib/numorstring/protocol.go | 145 + .../api/pkg/lib/numorstring/type.go | 23 + .../api/pkg/lib/numorstring/uint8orstring.go | 90 + .../protobuf/encoding/prototext/decode.go | 116 +- .../protobuf/encoding/prototext/encode.go | 39 +- .../protobuf/encoding/protowire/wire.go | 23 +- .../protobuf/internal/descfmt/stringer.go | 66 +- .../internal/encoding/defval/default.go | 78 +- .../encoding/messageset/messageset.go | 7 +- .../protobuf/internal/encoding/tag/tag.go | 96 +- .../protobuf/internal/encoding/text/decode.go | 32 +- .../internal/encoding/text/decode_number.go | 6 +- .../protobuf/internal/encoding/text/doc.go | 4 +- .../protobuf/internal/errors/is_go112.go | 1 + .../protobuf/internal/errors/is_go113.go | 1 + .../protobuf/internal/filedesc/build.go | 19 +- .../protobuf/internal/filedesc/desc.go | 380 +-- .../protobuf/internal/filedesc/desc_init.go | 36 +- .../protobuf/internal/filedesc/desc_lazy.go | 80 +- .../protobuf/internal/filedesc/desc_list.go | 167 +- .../protobuf/internal/filedesc/placeholder.go | 136 +- .../protobuf/internal/filetype/build.go | 87 +- .../internal/flags/proto_legacy_disable.go | 1 + .../internal/flags/proto_legacy_enable.go | 1 + .../protobuf/internal/impl/api_export.go | 42 +- .../protobuf/internal/impl/checkinit.go | 12 +- .../protobuf/internal/impl/codec_extension.go | 36 +- .../protobuf/internal/impl/codec_field.go | 90 +- .../protobuf/internal/impl/codec_map.go | 20 +- .../protobuf/internal/impl/codec_map_go111.go | 1 + .../protobuf/internal/impl/codec_map_go112.go | 1 + .../protobuf/internal/impl/codec_message.go | 30 +- .../protobuf/internal/impl/codec_reflect.go | 1 + .../protobuf/internal/impl/codec_tables.go | 290 +- .../protobuf/internal/impl/codec_unsafe.go | 1 + .../protobuf/internal/impl/convert.go | 228 +- .../protobuf/internal/impl/convert_list.go | 42 +- .../protobuf/internal/impl/convert_map.go | 32 +- .../protobuf/internal/impl/decode.go | 29 +- .../protobuf/internal/impl/enum.go | 10 +- .../protobuf/internal/impl/extension.go | 26 +- .../protobuf/internal/impl/legacy_enum.go | 57 +- .../protobuf/internal/impl/legacy_export.go | 18 +- .../internal/impl/legacy_extension.go | 100 +- .../protobuf/internal/impl/legacy_message.go | 122 +- .../protobuf/internal/impl/merge.go | 32 +- .../protobuf/internal/impl/message.go | 41 +- .../protobuf/internal/impl/message_reflect.go | 74 +- .../internal/impl/message_reflect_field.go | 118 +- .../protobuf/internal/impl/pointer_reflect.go | 1 + .../protobuf/internal/impl/pointer_unsafe.go | 1 + .../protobuf/internal/impl/validate.go | 50 +- .../protobuf/internal/impl/weak.go | 16 +- .../protobuf/internal/order/order.go | 16 +- .../protobuf/internal/order/range.go | 22 +- .../protobuf/internal/strs/strings_pure.go | 1 + .../protobuf/internal/strs/strings_unsafe.go | 7 +- .../protobuf/internal/version/version.go | 54 +- .../protobuf/proto/decode.go | 20 +- .../google.golang.org/protobuf/proto/doc.go | 21 +- .../protobuf/proto/encode.go | 5 +- .../google.golang.org/protobuf/proto/equal.go | 50 +- .../protobuf/proto/proto_methods.go | 1 + .../protobuf/proto/proto_reflect.go | 1 + .../reflect/protodesc/desc_resolve.go | 6 +- .../protobuf/reflect/protoreflect/methods.go | 1 + .../protobuf/reflect/protoreflect/proto.go | 32 +- .../protobuf/reflect/protoreflect/source.go | 1 + .../protobuf/reflect/protoreflect/type.go | 1 + .../reflect/protoreflect/value_pure.go | 1 + .../reflect/protoreflect/value_union.go | 27 + .../reflect/protoreflect/value_unsafe.go | 1 + .../reflect/protoregistry/registry.go | 2 + .../protobuf/runtime/protoiface/methods.go | 1 + .../protobuf/runtime/protoimpl/version.go | 8 +- vendor/k8s.io/klog/v2/OWNERS | 1 + vendor/k8s.io/klog/v2/README.md | 1 - vendor/k8s.io/klog/v2/contextual.go | 43 +- vendor/k8s.io/klog/v2/internal/dbg/dbg.go | 42 + .../klog/v2/internal/serialize/keyvalues.go | 120 +- vendor/k8s.io/klog/v2/k8s_references.go | 64 + vendor/k8s.io/klog/v2/klog.go | 395 ++- vendor/k8s.io/klog/v2/klogr.go | 8 +- .../k8s.io/kube-openapi/pkg/common/common.go | 50 +- .../kube-openapi/pkg/handler3/handler.go | 36 +- .../k8s.io/kube-openapi/pkg/internal/flags.go | 24 + .../pkg/internal/serialization.go | 65 + .../go-json-experiment/json}/AUTHORS | 0 .../go-json-experiment/json}/CONTRIBUTORS | 0 .../go-json-experiment/json}/LICENSE | 2 +- .../go-json-experiment/json/README.md | 321 ++ .../go-json-experiment/json/arshal.go | 513 +++ .../go-json-experiment/json/arshal_any.go | 238 ++ .../go-json-experiment/json/arshal_default.go | 1485 +++++++++ .../go-json-experiment/json/arshal_funcs.go | 387 +++ .../go-json-experiment/json/arshal_inlined.go | 213 ++ .../go-json-experiment/json/arshal_methods.go | 229 ++ .../go-json-experiment/json/arshal_time.go | 241 ++ .../go-json-experiment/json/decode.go | 1655 ++++++++++ .../go-json-experiment/json/doc.go | 182 ++ .../go-json-experiment/json/encode.go | 1170 +++++++ .../go-json-experiment/json/errors.go | 183 ++ .../go-json-experiment/json/fields.go | 509 +++ .../go-json-experiment/json/fold.go | 56 + .../go-json-experiment/json/intern.go | 86 + .../go-json-experiment/json/pools.go | 182 ++ .../go-json-experiment/json/state.go | 747 +++++ .../go-json-experiment/json/token.go | 522 +++ .../go-json-experiment/json/value.go | 381 +++ .../kube-openapi/pkg/schemaconv/openapi.go | 260 ++ .../pkg/schemaconv/proto_models.go | 178 ++ .../k8s.io/kube-openapi/pkg/schemaconv/smd.go | 292 +- .../k8s.io/kube-openapi/pkg/spec3/encoding.go | 22 +- .../k8s.io/kube-openapi/pkg/spec3/example.go | 25 +- .../pkg/spec3/external_documentation.go | 21 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 254 ++ .../k8s.io/kube-openapi/pkg/spec3/header.go | 21 + .../kube-openapi/pkg/spec3/media_type.go | 22 +- .../kube-openapi/pkg/spec3/operation.go | 22 +- .../kube-openapi/pkg/spec3/parameter.go | 22 + vendor/k8s.io/kube-openapi/pkg/spec3/path.go | 82 +- .../kube-openapi/pkg/spec3/request_body.go | 23 +- .../k8s.io/kube-openapi/pkg/spec3/response.go | 121 +- .../pkg/spec3/security_requirement.go | 56 - .../kube-openapi/pkg/spec3/security_scheme.go | 2 +- .../k8s.io/kube-openapi/pkg/spec3/server.go | 41 +- vendor/k8s.io/kube-openapi/pkg/spec3/spec.go | 13 + .../pkg/util/proto/document_v3.go | 4 +- .../kube-openapi/pkg/validation/spec/fuzz.go | 502 +++ .../pkg/validation/spec/gnostic.go | 1517 +++++++++ .../pkg/validation/spec/header.go | 43 + .../kube-openapi/pkg/validation/spec/info.go | 45 + .../kube-openapi/pkg/validation/spec/items.go | 71 + .../pkg/validation/spec/operation.go | 50 + .../pkg/validation/spec/parameter.go | 93 +- .../pkg/validation/spec/path_item.go | 39 + .../kube-openapi/pkg/validation/spec/paths.go | 79 + .../kube-openapi/pkg/validation/spec/ref.go | 18 +- .../pkg/validation/spec/response.go | 55 +- .../pkg/validation/spec/responses.go | 106 +- .../pkg/validation/spec/schema.go | 118 + .../pkg/validation/spec/security_scheme.go | 28 + .../pkg/validation/spec/swagger.go | 171 +- .../kube-openapi/pkg/validation/spec/tag.go | 32 + vendor/k8s.io/utils/pointer/pointer.go | 138 + vendor/k8s.io/utils/trace/trace.go | 30 +- vendor/modules.txt | 77 +- .../internal/golang/encoding/json/decode.go | 55 +- .../internal/golang/encoding/json/encode.go | 71 +- .../internal/golang/encoding/json/fold.go | 5 +- .../internal/golang/encoding/json/fuzz.go | 9 +- .../golang/encoding/json/kubernetes_patch.go | 49 +- .../internal/golang/encoding/json/scanner.go | 4 +- .../internal/golang/encoding/json/stream.go | 9 +- .../internal/golang/encoding/json/tags.go | 16 +- vendor/sigs.k8s.io/json/json.go | 39 +- .../v4/schema/elements.go | 131 +- .../structured-merge-diff/v4/schema/equals.go | 3 + .../v4/schema/schemaschema.go | 3 + .../structured-merge-diff/v4/typed/helpers.go | 6 +- .../structured-merge-diff/v4/typed/merge.go | 7 +- .../v4/typed/reconcile_schema.go | 2 +- .../structured-merge-diff/v4/typed/typed.go | 13 +- 343 files changed, 28564 insertions(+), 4739 deletions(-) delete mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore delete mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE delete mode 100644 vendor/github.com/PuerkitoBio/purell/README.md delete mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md delete mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go delete mode 100644 vendor/github.com/emicklei/go-restful/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/.gitignore (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/.goconvey rename vendor/github.com/emicklei/go-restful/{ => v3}/.travis.yml (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/CHANGES.md (72%) rename vendor/github.com/emicklei/go-restful/{ => v3}/LICENSE (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/Makefile rename vendor/github.com/emicklei/go-restful/{ => v3}/README.md (89%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/SECURITY.md rename vendor/github.com/emicklei/go-restful/{ => v3}/Srcfile (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/bench_test.sh (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compress.go (92%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_cache.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressor_pools.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/compressors.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/constants.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/container.go (97%) rename vendor/github.com/emicklei/go-restful/{ => v3}/cors_filter.go (81%) rename vendor/github.com/emicklei/go-restful/{ => v3}/coverage.sh (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/curly_route.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/custom_verb.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/doc.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/entity_accessors.go (100%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/extensions.go rename vendor/github.com/emicklei/go-restful/{ => v3}/filter.go (79%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/filter_adapter.go rename vendor/github.com/emicklei/go-restful/{ => v3}/json.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsoniter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/jsr311.go (96%) rename vendor/github.com/emicklei/go-restful/{ => v3}/log/log.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/logger.go (95%) rename vendor/github.com/emicklei/go-restful/{ => v3}/mime.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/options_filter.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/parameter.go (56%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_expression.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/path_processor.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/request.go (85%) rename vendor/github.com/emicklei/go-restful/{ => v3}/response.go (99%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route.go (97%) rename vendor/github.com/emicklei/go-restful/{ => v3}/route_builder.go (96%) create mode 100644 vendor/github.com/emicklei/go-restful/v3/route_reader.go rename vendor/github.com/emicklei/go-restful/{ => v3}/router.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/service_error.go (100%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service.go (91%) rename vendor/github.com/emicklei/go-restful/{ => v3}/web_service_container.go (100%) create mode 100644 vendor/github.com/go-logr/logr/funcr/funcr.go delete mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml delete mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/zero.go create mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go create mode 100644 vendor/github.com/onsi/gomega/internal/polling_signal_error.go create mode 100644 vendor/github.com/onsi/gomega/internal/vetoptdesc.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_each_matcher.go create mode 100644 vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go create mode 100644 vendor/github.com/projectcalico/api/LICENSE create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/clusterinfo.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/constants.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkpolicy.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkset.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/hostendpoint.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ippool.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkpolicy.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkset.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/policy_common.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/profile.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.deepcopy.go create mode 100644 vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/asnumber.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/doc.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/port.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/protocol.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/type.go create mode 100644 vendor/github.com/projectcalico/api/pkg/lib/numorstring/uint8orstring.go create mode 100644 vendor/k8s.io/klog/v2/internal/dbg/dbg.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/flags.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/serialization.go rename vendor/{google.golang.org/protobuf => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/AUTHORS (100%) rename vendor/{google.golang.org/protobuf => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/CONTRIBUTORS (100%) rename vendor/{github.com/PuerkitoBio/urlesc => k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json}/LICENSE (96%) create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go delete mode 100644 vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go diff --git a/go.mod b/go.mod index 54a1ff9c6..1d852087b 100644 --- a/go.mod +++ b/go.mod @@ -7,13 +7,14 @@ require ( github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-version v1.6.0 github.com/jackc/pgx/v5 v5.4.3 - github.com/onsi/ginkgo/v2 v2.0.0 - github.com/onsi/gomega v1.18.1 + github.com/onsi/ginkgo/v2 v2.4.0 + github.com/onsi/gomega v1.23.0 + github.com/projectcalico/api v0.0.0-20230602153125-fb7148692637 go.uber.org/zap v1.19.1 - k8s.io/api v0.24.2 - k8s.io/apimachinery v0.24.2 - k8s.io/client-go v0.24.2 - k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + k8s.io/api v0.26.3 + k8s.io/apimachinery v0.26.3 + k8s.io/client-go v0.26.3 + k8s.io/utils v0.0.0-20221107191617-1a15be271d1d kubevirt.io/api v0.59.0 kubevirt.io/containerized-data-importer-api v1.56.0 sigs.k8s.io/controller-runtime v0.12.2 @@ -27,25 +28,23 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful v2.15.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-logr/zapr v1.2.0 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/swag v0.21.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.6 // indirect - github.com/google/gofuzz v1.1.0 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.1.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -71,23 +70,29 @@ require ( go.uber.org/multierr v1.6.0 // indirect golang.org/x/crypto v0.9.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.24.2 // indirect k8s.io/component-base v0.24.2 // indirect - k8s.io/klog/v2 v2.60.1 // indirect - k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect + k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect - sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) + +replace ( + k8s.io/api => k8s.io/api v0.24.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.24.2 + k8s.io/client-go => k8s.io/client-go v0.24.2 +) diff --git a/go.sum b/go.sum index a1b54e9ff..eca83184c 100644 --- a/go.sum +++ b/go.sum @@ -58,9 +58,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -128,8 +126,9 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= +github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -171,16 +170,19 @@ github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -241,11 +243,12 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -341,6 +344,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -393,14 +397,16 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 h1:+ShYlGoPriGahTTFTjQ0RtNXW0srxDodk2STdc238Rk= github.com/openshift/api v0.0.0-20211217221424-8779abfbd571/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= @@ -421,6 +427,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/projectcalico/api v0.0.0-20230602153125-fb7148692637 h1:F48and+6vKJsRMl95Y/XKVik0Kwhos8YShTH9Fsdqlw= +github.com/projectcalico/api v0.0.0-20230602153125-fb7148692637/go.mod h1:d3yVTVhVHDawgeKrru/ZZD8QLEtiKQciUaAwnua47Qg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -483,13 +491,18 @@ github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -663,8 +676,9 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -833,7 +847,6 @@ golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= @@ -946,14 +959,16 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -989,14 +1004,10 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= -k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= k8s.io/api v0.24.2 h1:g518dPU/L7VRLxWfcadQn2OnsiGWVOadTLpdnqgY2OI= k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k= k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ= -k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= -k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= k8s.io/apimachinery v0.24.2 h1:5QlH9SL2C8KMcrNJPor+LbXVTaZRReml7svPEh4OKDM= k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI= @@ -1013,17 +1024,18 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= +k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU= +k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= +k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v0.59.0 h1:UDsJWklzd0x/w3EQjc48jafZc4p4vVxKUpmBhg2nVRk= kubevirt.io/api v0.59.0/go.mod h1:zts/6mioR8vGgvYmQ17Cb9XsUR9e/WjJcdokmrE38wY= kubevirt.io/containerized-data-importer-api v1.56.0 h1:Ehc6CbT3mG2uz+9s3t2N4HnpdK5GfQMt2DCCXCz2sDI= @@ -1036,13 +1048,14 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= sigs.k8s.io/controller-runtime v0.12.2 h1:nqV02cvhbAj7tbt21bpPpTByrXGn2INHRsi39lXy9sE= sigs.k8s.io/controller-runtime v0.12.2/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore deleted file mode 100644 index 748e4c807..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -*.sublime-* -.DS_Store -*.swp -*.swo -tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml deleted file mode 100644 index cf31e6af6..000000000 --- a/vendor/github.com/PuerkitoBio/purell/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - "1.10.x" - - "1.11.x" - - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE deleted file mode 100644 index 4b9986dea..000000000 --- a/vendor/github.com/PuerkitoBio/purell/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012, Martin Angers -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md deleted file mode 100644 index 07de0c498..000000000 --- a/vendor/github.com/PuerkitoBio/purell/README.md +++ /dev/null @@ -1,188 +0,0 @@ -# Purell - -Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... - -Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. - -[![build status](https://travis-ci.org/PuerkitoBio/purell.svg?branch=master)](http://travis-ci.org/PuerkitoBio/purell) - -## Install - -`go get github.com/PuerkitoBio/purell` - -## Changelog - -* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor). -* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). -* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). -* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). -* **v0.2.0** : Add benchmarks, Attempt IDN support. -* **v0.1.0** : Initial release. - -## Examples - -From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): - -```go -package purell - -import ( - "fmt" - "net/url" -) - -func ExampleNormalizeURLString() { - if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", - FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { - panic(err) - } else { - fmt.Print(normalized) - } - // Output: http://somewebsite.com:80/Amazing%3F/url/ -} - -func ExampleMustNormalizeURLString() { - normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", - FlagsUnsafeGreedy) - fmt.Print(normalized) - - // Output: http://somewebsite.com/Amazing%FA/url -} - -func ExampleNormalizeURL() { - if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { - panic(err) - } else { - normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) - fmt.Print(normalized) - } - - // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 -} -``` - -## API - -As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: - -```go -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) -``` - -For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. - -The [full godoc reference is available on gopkgdoc][godoc]. - -Some things to note: - -* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. - -* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): - - %24 -> $ - - %26 -> & - - %2B-%3B -> +,-./0123456789:; - - %3D -> = - - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ - - %5F -> _ - - %61-%7A -> abcdefghijklmnopqrstuvwxyz - - %7E -> ~ - - -* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). - -* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. - -* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. - -### Safe vs Usually Safe vs Unsafe - -Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. - -Consider the following URL: - -`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -Normalizing with the `FlagsSafe` gives: - -`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` - -With the `FlagsUsuallySafeGreedy`: - -`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` - -And with `FlagsUnsafeGreedy`: - -`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` - -## TODOs - -* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. - -## Thanks / Contributions - -@rogpeppe -@jehiah -@opennota -@pchristopher1275 -@zenovich -@beeker1121 - -## License - -The [BSD 3-Clause license][bsd]. - -[bsd]: http://opensource.org/licenses/BSD-3-Clause -[wiki]: http://en.wikipedia.org/wiki/URL_normalization -[rfc]: http://tools.ietf.org/html/rfc3986#section-6 -[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell -[pr5]: https://github.com/PuerkitoBio/purell/pull/5 -[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go deleted file mode 100644 index 6d0fc190a..000000000 --- a/vendor/github.com/PuerkitoBio/purell/purell.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Package purell offers URL normalization as described on the wikipedia page: -http://en.wikipedia.org/wiki/URL_normalization -*/ -package purell - -import ( - "bytes" - "fmt" - "net/url" - "regexp" - "sort" - "strconv" - "strings" - - "github.com/PuerkitoBio/urlesc" - "golang.org/x/net/idna" - "golang.org/x/text/unicode/norm" - "golang.org/x/text/width" -) - -// A set of normalization flags determines how a URL will -// be normalized. -type NormalizationFlags uint - -const ( - // Safe normalizations - FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 - FlagLowercaseHost // http://HOST -> http://host - FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF - FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA - FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ - FlagRemoveDefaultPort // http://host:80 -> http://host - FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path - - // Usually safe normalizations - FlagRemoveTrailingSlash // http://host/path/ -> http://host/path - FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) - FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c - - // Unsafe normalizations - FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ - FlagRemoveFragment // http://host/path#fragment -> http://host/path - FlagForceHTTP // https://host -> http://host - FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b - FlagRemoveWWW // http://www.host/ -> http://host/ - FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) - FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 - - // Normalizations not in the wikipedia article, required to cover tests cases - // submitted by jehiah - FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 - FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 - FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 - FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path - FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path - - // Convenience set of safe normalizations - FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator - - // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, - // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". - - // Convenience set of usually safe normalizations (includes FlagsSafe) - FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments - FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments - - // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) - FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery - FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery - - // Convenience set of all available flags - FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator - FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator -) - -const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" -) - -// Regular expressions used by the normalizations -var rxPort = regexp.MustCompile(`(:\d+)/?$`) -var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) -var rxDupSlashes = regexp.MustCompile(`/{2,}`) -var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) -var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) -var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) -var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) -var rxEmptyPort = regexp.MustCompile(`:+$`) - -// Map of flags to implementation function. -// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically -// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. - -// Since maps have undefined traversing order, make a slice of ordered keys -var flagsOrder = []NormalizationFlags{ - FlagLowercaseScheme, - FlagLowercaseHost, - FlagRemoveDefaultPort, - FlagRemoveDirectoryIndex, - FlagRemoveDotSegments, - FlagRemoveFragment, - FlagForceHTTP, // Must be after remove default port (because https=443/http=80) - FlagRemoveDuplicateSlashes, - FlagRemoveWWW, - FlagAddWWW, - FlagSortQuery, - FlagDecodeDWORDHost, - FlagDecodeOctalHost, - FlagDecodeHexHost, - FlagRemoveUnnecessaryHostDots, - FlagRemoveEmptyPortSeparator, - FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last - FlagAddTrailingSlash, -} - -// ... and then the map, where order is unimportant -var flags = map[NormalizationFlags]func(*url.URL){ - FlagLowercaseScheme: lowercaseScheme, - FlagLowercaseHost: lowercaseHost, - FlagRemoveDefaultPort: removeDefaultPort, - FlagRemoveDirectoryIndex: removeDirectoryIndex, - FlagRemoveDotSegments: removeDotSegments, - FlagRemoveFragment: removeFragment, - FlagForceHTTP: forceHTTP, - FlagRemoveDuplicateSlashes: removeDuplicateSlashes, - FlagRemoveWWW: removeWWW, - FlagAddWWW: addWWW, - FlagSortQuery: sortQuery, - FlagDecodeDWORDHost: decodeDWORDHost, - FlagDecodeOctalHost: decodeOctalHost, - FlagDecodeHexHost: decodeHexHost, - FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, - FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, - FlagRemoveTrailingSlash: removeTrailingSlash, - FlagAddTrailingSlash: addTrailingSlash, -} - -// MustNormalizeURLString returns the normalized string, and panics if an error occurs. -// It takes an URL string as input, as well as the normalization flags. -func MustNormalizeURLString(u string, f NormalizationFlags) string { - result, e := NormalizeURLString(u, f) - if e != nil { - panic(e) - } - return result -} - -// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. -// It takes an URL string as input, as well as the normalization flags. -func NormalizeURLString(u string, f NormalizationFlags) (string, error) { - parsed, err := url.Parse(u) - if err != nil { - return "", err - } - - if f&FlagLowercaseHost == FlagLowercaseHost { - parsed.Host = strings.ToLower(parsed.Host) - } - - // The idna package doesn't fully conform to RFC 5895 - // (https://tools.ietf.org/html/rfc5895), so we do it here. - // Taken from Go 1.8 cycle source, courtesy of bradfitz. - // TODO: Remove when (if?) idna package conforms to RFC 5895. - parsed.Host = width.Fold.String(parsed.Host) - parsed.Host = norm.NFC.String(parsed.Host) - if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { - return "", err - } - - return NormalizeURL(parsed, f), nil -} - -// NormalizeURL returns the normalized string. -// It takes a parsed URL object as input, as well as the normalization flags. -func NormalizeURL(u *url.URL, f NormalizationFlags) string { - for _, k := range flagsOrder { - if f&k == k { - flags[k](u) - } - } - return urlesc.Escape(u) -} - -func lowercaseScheme(u *url.URL) { - if len(u.Scheme) > 0 { - u.Scheme = strings.ToLower(u.Scheme) - } -} - -func lowercaseHost(u *url.URL) { - if len(u.Host) > 0 { - u.Host = strings.ToLower(u.Host) - } -} - -func removeDefaultPort(u *url.URL) { - if len(u.Host) > 0 { - scheme := strings.ToLower(u.Scheme) - u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { - return "" - } - return val - }) - } -} - -func removeTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if strings.HasSuffix(u.Path, "/") { - u.Path = u.Path[:l-1] - } - } else if l = len(u.Host); l > 0 { - if strings.HasSuffix(u.Host, "/") { - u.Host = u.Host[:l-1] - } - } -} - -func addTrailingSlash(u *url.URL) { - if l := len(u.Path); l > 0 { - if !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } else if l = len(u.Host); l > 0 { - if !strings.HasSuffix(u.Host, "/") { - u.Host += "/" - } - } -} - -func removeDotSegments(u *url.URL) { - if len(u.Path) > 0 { - var dotFree []string - var lastIsDot bool - - sections := strings.Split(u.Path, "/") - for _, s := range sections { - if s == ".." { - if len(dotFree) > 0 { - dotFree = dotFree[:len(dotFree)-1] - } - } else if s != "." { - dotFree = append(dotFree, s) - } - lastIsDot = (s == "." || s == "..") - } - // Special case if host does not end with / and new path does not begin with / - u.Path = strings.Join(dotFree, "/") - if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - // Special case if the last segment was a dot, make sure the path ends with a slash - if lastIsDot && !strings.HasSuffix(u.Path, "/") { - u.Path += "/" - } - } -} - -func removeDirectoryIndex(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") - } -} - -func removeFragment(u *url.URL) { - u.Fragment = "" -} - -func forceHTTP(u *url.URL) { - if strings.ToLower(u.Scheme) == "https" { - u.Scheme = "http" - } -} - -func removeDuplicateSlashes(u *url.URL) { - if len(u.Path) > 0 { - u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") - } -} - -func removeWWW(u *url.URL) { - if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = u.Host[4:] - } -} - -func addWWW(u *url.URL) { - if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { - u.Host = "www." + u.Host - } -} - -func sortQuery(u *url.URL) { - q := u.Query() - - if len(q) > 0 { - arKeys := make([]string, len(q)) - i := 0 - for k := range q { - arKeys[i] = k - i++ - } - sort.Strings(arKeys) - buf := new(bytes.Buffer) - for _, k := range arKeys { - sort.Strings(q[k]) - for _, v := range q[k] { - if buf.Len() > 0 { - buf.WriteRune('&') - } - buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) - } - } - - // Rebuild the raw query string - u.RawQuery = buf.String() - } -} - -func decodeDWORDHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { - var parts [4]int64 - - dword, _ := strconv.ParseInt(matches[1], 10, 0) - for i, shift := range []uint{24, 16, 8, 0} { - parts[i] = dword >> shift & 0xFF - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) - } - } -} - -func decodeOctalHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { - var parts [4]int64 - - for i := 1; i <= 4; i++ { - parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) - } - u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) - } - } -} - -func decodeHexHost(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { - // Conversion is safe because of regex validation - parsed, _ := strconv.ParseInt(matches[1], 16, 0) - // Set host as DWORD (base 10) encoded host - u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) - // The rest is the same as decoding a DWORD host - decodeDWORDHost(u) - } - } -} - -func removeUnncessaryHostDots(u *url.URL) { - if len(u.Host) > 0 { - if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { - // Trim the leading and trailing dots - u.Host = strings.Trim(matches[1], ".") - if len(matches) > 2 { - u.Host += matches[2] - } - } - } -} - -func removeEmptyPortSeparator(u *url.URL) { - if len(u.Host) > 0 { - u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") - } -} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml deleted file mode 100644 index ba6b225f9..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - tip - -install: - - go build . - -script: - - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md deleted file mode 100644 index 57aff0a53..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/README.md +++ /dev/null @@ -1,16 +0,0 @@ -urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.svg?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) -====== - -Package urlesc implements query escaping as per RFC 3986. - -It contains some parts of the net/url package, modified so as to allow -some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). - -## Install - - go get github.com/PuerkitoBio/urlesc - -## License - -Go license (BSD-3-Clause) - diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go deleted file mode 100644 index 1b8462459..000000000 --- a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package urlesc implements query escaping as per RFC 3986. -// It contains some parts of the net/url package, modified so as to allow -// some reserved characters incorrectly escaped by net/url. -// See https://github.com/golang/go/issues/5684 -package urlesc - -import ( - "bytes" - "net/url" - "strings" -) - -type encoding int - -const ( - encodePath encoding = 1 + iota - encodeUserPassword - encodeQueryComponent - encodeFragment -) - -// Return true if the specified character should be escaped when -// appearing in a URL string, according to RFC 3986. -func shouldEscape(c byte, mode encoding) bool { - // §2.3 Unreserved characters (alphanum) - if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { - return false - } - - switch c { - case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) - return false - - // §2.2 Reserved characters (reserved) - case ':', '/', '?', '#', '[', ']', '@', // gen-delims - '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims - // Different sections of the URL allow a few of - // the reserved characters to appear unescaped. - switch mode { - case encodePath: // §3.3 - // The RFC allows sub-delims and : @. - // '/', '[' and ']' can be used to assign meaning to individual path - // segments. This package only manipulates the path as a whole, - // so we allow those as well. That leaves only ? and # to escape. - return c == '?' || c == '#' - - case encodeUserPassword: // §3.2.1 - // The RFC allows : and sub-delims in - // userinfo. The parsing of userinfo treats ':' as special so we must escape - // all the gen-delims. - return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' - - case encodeQueryComponent: // §3.4 - // The RFC allows / and ?. - return c != '/' && c != '?' - - case encodeFragment: // §4.1 - // The RFC text is silent but the grammar allows - // everything, so escape nothing but # - return c == '#' - } - } - - // Everything else must be escaped. - return true -} - -// QueryEscape escapes the string so it can be safely placed -// inside a URL query. -func QueryEscape(s string) string { - return escape(s, encodeQueryComponent) -} - -func escape(s string, mode encoding) string { - spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { - c := s[i] - if shouldEscape(c, mode) { - if c == ' ' && mode == encodeQueryComponent { - spaceCount++ - } else { - hexCount++ - } - } - } - - if spaceCount == 0 && hexCount == 0 { - return s - } - - t := make([]byte, len(s)+2*hexCount) - j := 0 - for i := 0; i < len(s); i++ { - switch c := s[i]; { - case c == ' ' && mode == encodeQueryComponent: - t[j] = '+' - j++ - case shouldEscape(c, mode): - t[j] = '%' - t[j+1] = "0123456789ABCDEF"[c>>4] - t[j+2] = "0123456789ABCDEF"[c&15] - j += 3 - default: - t[j] = s[i] - j++ - } - } - return string(t) -} - -var uiReplacer = strings.NewReplacer( - "%21", "!", - "%27", "'", - "%28", "(", - "%29", ")", - "%2A", "*", -) - -// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. -func unescapeUserinfo(s string) string { - return uiReplacer.Replace(s) -} - -// Escape reassembles the URL into a valid URL string. -// The general form of the result is one of: -// -// scheme:opaque -// scheme://userinfo@host/path?query#fragment -// -// If u.Opaque is non-empty, String uses the first form; -// otherwise it uses the second form. -// -// In the second form, the following rules apply: -// - if u.Scheme is empty, scheme: is omitted. -// - if u.User is nil, userinfo@ is omitted. -// - if u.Host is empty, host/ is omitted. -// - if u.Scheme and u.Host are empty and u.User is nil, -// the entire scheme://userinfo@host/ is omitted. -// - if u.Host is non-empty and u.Path begins with a /, -// the form host/path does not add its own /. -// - if u.RawQuery is empty, ?query is omitted. -// - if u.Fragment is empty, #fragment is omitted. -func Escape(u *url.URL) string { - var buf bytes.Buffer - if u.Scheme != "" { - buf.WriteString(u.Scheme) - buf.WriteByte(':') - } - if u.Opaque != "" { - buf.WriteString(u.Opaque) - } else { - if u.Scheme != "" || u.Host != "" || u.User != nil { - buf.WriteString("//") - if ui := u.User; ui != nil { - buf.WriteString(unescapeUserinfo(ui.String())) - buf.WriteByte('@') - } - if h := u.Host; h != "" { - buf.WriteString(h) - } - } - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - buf.WriteString(escape(u.Path, encodePath)) - } - if u.RawQuery != "" { - buf.WriteByte('?') - buf.WriteString(u.RawQuery) - } - if u.Fragment != "" { - buf.WriteByte('#') - buf.WriteString(escape(u.Fragment, encodeFragment)) - } - return buf.String() -} diff --git a/vendor/github.com/emicklei/go-restful/Makefile b/vendor/github.com/emicklei/go-restful/Makefile deleted file mode 100644 index 3a824ac3d..000000000 --- a/vendor/github.com/emicklei/go-restful/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -all: test - -test: - go vet . - go test -cover -v . \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/.gitignore b/vendor/github.com/emicklei/go-restful/v3/.gitignore similarity index 100% rename from vendor/github.com/emicklei/go-restful/.gitignore rename to vendor/github.com/emicklei/go-restful/v3/.gitignore diff --git a/vendor/github.com/emicklei/go-restful/v3/.goconvey b/vendor/github.com/emicklei/go-restful/v3/.goconvey new file mode 100644 index 000000000..8485e986e --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/.goconvey @@ -0,0 +1 @@ +ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/.travis.yml b/vendor/github.com/emicklei/go-restful/v3/.travis.yml similarity index 100% rename from vendor/github.com/emicklei/go-restful/.travis.yml rename to vendor/github.com/emicklei/go-restful/v3/.travis.yml diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md similarity index 72% rename from vendor/github.com/emicklei/go-restful/CHANGES.md rename to vendor/github.com/emicklei/go-restful/v3/CHANGES.md index f7409d546..74a378157 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,100 +1,162 @@ -# Change history of go-restful (v2 only) +# Change history of go-restful -## v2.15.0 - 2020-11-10 +## [v3.9.0] - 20221-07-21 -- Add OPTIONS in Webservice +- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) + +## [v3.8.0] - 20221-06-06 + +- use exact matching of allowed domain entries, issue #489 (#493) + - this changes fixes [security] Authorization Bypass Through User-Controlled Key + by changing the behaviour of the AllowedDomains setting in the CORS filter. + To support the previous behaviour, the CORS filter type now has a AllowedDomainFunc + callback mechanism which is called when a simple domain match fails. +- add test and fix for POST without body and Content-type, issue #492 (#496) +- [Minor] Bad practice to have a mix of Receiver types. (#491) + +## [v3.7.2] - 2021-11-24 + +- restored FilterChain (#482 by SVilgelm) + + +## [v3.7.1] - 2021-10-04 + +- fix problem with contentEncodingEnabled setting (#479) + +## [v3.7.0] - 2021-09-24 + +- feat(parameter): adds additional openapi mappings (#478) + +## [v3.6.0] - 2021-09-18 + +- add support for vendor extensions (#477 thx erraggy) + +## [v3.5.2] - 2021-07-14 + +- fix removing absent route from webservice (#472) + +## [v3.5.1] - 2021-04-12 + +- fix handling no match access selected path +- remove obsolete field + +## [v3.5.0] - 2021-04-10 + +- add check for wildcard (#463) in CORS +- add access to Route from Request, issue #459 (#462) + +## [v3.4.0] - 2020-11-10 + +- Added OPTIONS to WebService + +## [v3.3.2] - 2020-01-23 -## v2.14.3 - 2020-08-31 - Fixed duplicate compression in dispatch. #449 -## v2.14.2 - 2020-08-31 +## [v3.3.1] - 2020-08-31 - Added check on writer to prevent compression of response twice. #447 -## v2.14.0 - 2020-08-19 +## [v3.3.0] - 2020-08-19 - Enable content encoding on Handle and ServeHTTP (#446) - List available representations in 406 body (#437) - Convert to string using rune() (#443) -## v2.13.0 - 2020-06-21 +## [v3.2.0] - 2020-06-21 -- 405 Method Not Allowed must have Allow header (#436) +- 405 Method Not Allowed must have Allow header (#436) (thx Bracken ) - add field allowedMethodsWithoutContentType (#424) -## v2.12.0 +## [v3.1.0] - support describing response headers (#426) - fix openapi examples (#425) -- merge v3 fix (#422) -## v2.11.1 +v3.0.0 + +- fix: use request/response resulting from filter chain +- add Go module + Module consumer should use github.com/emicklei/go-restful/v3 as import path + +v2.10.0 + +- support for Custom Verbs (thanks Vinci Xu <277040271@qq.com>) +- fixed static example (thanks Arthur ) +- simplify code (thanks Christian Muehlhaeuser ) +- added JWT HMAC with SHA-512 authentication code example (thanks Amim Knabben ) + +v2.9.6 + +- small optimization in filter code + +v2.11.1 - fix WriteError return value (#415) -## v2.11.0 +v2.11.0 - allow prefix and suffix in path variable expression (#414) -## v2.9.6 +v2.9.6 - support google custome verb (#413) -## v2.9.5 +v2.9.5 - fix panic in Response.WriteError if err == nil -## v2.9.4 +v2.9.4 - fix issue #400 , parsing mime type quality - Route Builder added option for contentEncodingEnabled (#398) -## v2.9.3 +v2.9.3 - Avoid return of 415 Unsupported Media Type when request body is empty (#396) -## v2.9.2 +v2.9.2 - Reduce allocations in per-request methods to improve performance (#395) -## v2.9.1 +v2.9.1 - Fix issue with default responses and invalid status code 0. (#393) -## v2.9.0 +v2.9.0 - add per Route content encoding setting (overrides container setting) -## v2.8.0 +v2.8.0 - add Request.QueryParameters() - add json-iterator (via build tag) - disable vgo module (until log is moved) -## v2.7.1 +v2.7.1 - add vgo module -## v2.6.1 +v2.6.1 - add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) -## v2.6.0 +v2.6.0 - Make JSR 311 routing and path param processing consistent - Adding description to RouteBuilder.Reads() - Update example for Swagger12 and OpenAPI -## 2017-09-13 +2017-09-13 - added route condition functions using `.If(func)` in route building. -## 2017-02-16 +2017-02-16 - solved issue #304, make operation names unique -## 2017-01-30 +2017-01-30 [IMPORTANT] For swagger users, change your import statement to: swagger "github.com/emicklei/go-restful-swagger12" @@ -102,60 +164,60 @@ - moved swagger 1.2 code to go-restful-swagger12 - created TAG 2.0.0 -## 2017-01-27 +2017-01-27 - remove defer request body close - expose Dispatch for testing filters and Routefunctions - swagger response model cannot be array - created TAG 1.0.0 -## 2016-12-22 +2016-12-22 - (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool) -## 2016-11-26 +2016-11-26 - Default change! now use CurlyRouter (was RouterJSR311) - Default change! no more caching of request content - Default change! do not recover from panics -## 2016-09-22 +2016-09-22 - fix the DefaultRequestContentType feature -## 2016-02-14 +2016-02-14 - take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response - add constructors for custom entity accessors for xml and json -## 2015-09-27 +2015-09-27 - rename new WriteStatusAnd... to WriteHeaderAnd... for consistency -## 2015-09-25 +2015-09-25 - fixed problem with changing Header after WriteHeader (issue 235) -## 2015-09-14 +2015-09-14 - changed behavior of WriteHeader (immediate write) and WriteEntity (no status write) - added support for custom EntityReaderWriters. -## 2015-08-06 +2015-08-06 - add support for reading entities from compressed request content - use sync.Pool for compressors of http response and request body - add Description to Parameter for documentation in Swagger UI -## 2015-03-20 +2015-03-20 - add configurable logging -## 2015-03-18 +2015-03-18 - if not specified, the Operation is derived from the Route function -## 2015-03-17 +2015-03-17 - expose Parameter creation functions - make trace logger an interface @@ -164,26 +226,26 @@ - JSR311 router now handles wildcards - add Notes to Route -## 2014-11-27 +2014-11-27 - (api add) PrettyPrint per response. (as proposed in #167) -## 2014-11-12 +2014-11-12 - (api add) ApiVersion(.) for documentation in Swagger UI -## 2014-11-10 +2014-11-10 - (api change) struct fields tagged with "description" show up in Swagger UI -## 2014-10-31 +2014-10-31 - (api change) ReturnsError -> Returns - (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder - fix swagger nested structs - sort Swagger response messages by code -## 2014-10-23 +2014-10-23 - (api add) ReturnsError allows you to document Http codes in swagger - fixed problem with greedy CurlyRouter @@ -197,73 +259,73 @@ - (api add) added AllowedDomains in CORS - (api add) ParameterNamed for detailed documentation -## 2014-04-16 +2014-04-16 - (api add) expose constructor of Request for testing. -## 2014-06-27 +2014-06-27 - (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification). - (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons). -## 2014-07-03 +2014-07-03 - (api add) CORS can be configured with a list of allowed domains -## 2014-03-12 +2014-03-12 - (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter) -## 2014-02-26 +2014-02-26 - (api add) Request now provides information about the matched Route, see method SelectedRoutePath -## 2014-02-17 +2014-02-17 - (api change) renamed parameter constants (go-lint checks) -## 2014-01-10 +2014-01-10 - (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier -## 2014-01-07 +2014-01-07 - (api change) Write* methods in Response now return the error or nil. - added example of serving HTML from a Go template. - fixed comparing Allowed headers in CORS (is now case-insensitive) -## 2013-11-13 +2013-11-13 - (api add) Response knows how many bytes are written to the response body. -## 2013-10-29 +2013-10-29 - (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information. -## 2013-10-04 +2013-10-04 - (api add) Response knows what HTTP status has been written - (api add) Request can have attributes (map of string->interface, also called request-scoped variables -## 2013-09-12 +2013-09-12 - (api change) Router interface simplified - Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths -## 2013-08-05 +2013-08-05 - add OPTIONS support - add CORS support -## 2013-08-27 +2013-08-27 - fixed some reported issues (see github) - (api change) deprecated use of WriteError; use WriteErrorString instead -## 2014-04-15 +2014-04-15 - (fix) v1.0.1 tag: fix Issue 111: WriteErrorString -## 2013-08-08 +2013-08-08 - (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer. - (api add) the swagger package has be extended to have a UI per container. @@ -276,38 +338,38 @@ Important API changes: - (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead. -## 2013-07-06 +2013-07-06 - (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature. -## 2013-06-19 +2013-06-19 - (improve) DoNotRecover option, moved request body closer, improved ReadEntity -## 2013-06-03 +2013-06-03 - (api change) removed Dispatcher interface, hide PathExpression - changed receiver names of type functions to be more idiomatic Go -## 2013-06-02 +2013-06-02 - (optimize) Cache the RegExp compilation of Paths. -## 2013-05-22 +2013-05-22 - (api add) Added support for request/response filter functions -## 2013-05-18 +2013-05-18 - (api add) Added feature to change the default Http Request Dispatch function (travis cline) - (api change) Moved Swagger Webservice to swagger package (see example restful-user) -## [2012-11-14 .. 2013-05-18> +[2012-11-14 .. 2013-05-18> - See https://github.com/emicklei/go-restful/commits -## 2012-11-14 +2012-11-14 - Initial commit diff --git a/vendor/github.com/emicklei/go-restful/LICENSE b/vendor/github.com/emicklei/go-restful/v3/LICENSE similarity index 100% rename from vendor/github.com/emicklei/go-restful/LICENSE rename to vendor/github.com/emicklei/go-restful/v3/LICENSE diff --git a/vendor/github.com/emicklei/go-restful/v3/Makefile b/vendor/github.com/emicklei/go-restful/v3/Makefile new file mode 100644 index 000000000..16d0b80bb --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/Makefile @@ -0,0 +1,8 @@ +all: test + +test: + go vet . + go test -cover -v . + +ex: + find ./examples -type f -name "*.go" | xargs -I {} go build -o /tmp/ignore {} \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md similarity index 89% rename from vendor/github.com/emicklei/go-restful/README.md rename to vendor/github.com/emicklei/go-restful/v3/README.md index e5878a668..0625359dc 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -7,7 +7,7 @@ package for building REST-style Web Services using Google Go [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) -- [Code examples using v3](https://github.com/emicklei/go-restful/tree/master/examples) +- [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping: @@ -21,23 +21,23 @@ REST asks developers to use HTTP methods explicitly and in a way that's consiste ### Usage -#### Using Go Modules +#### Without Go Modules -As of version `v3.0.0` (on the v3 branch), this package supports Go modules. +All versions up to `v2.*.*` (on the master) are not supporting Go modules. ``` import ( - restful "github.com/emicklei/go-restful/v3" + restful "github.com/emicklei/go-restful" ) ``` -#### Without Go Modules +#### Using Go Modules -All versions up to `v2.*.*` (on the master) are not supporting Go modules. +As of version `v3.0.0` (on the v3 branch), this package supports Go modules. ``` import ( - restful "github.com/emicklei/go-restful" + restful "github.com/emicklei/go-restful/v3" ) ``` @@ -61,16 +61,16 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo ... } ``` - -[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/user-resource/restful-user-resource.go) - + +[Full API of a UserResource](https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go) + ### Features - Routes for request → function mapping with path parameter (e.g. {id} but also prefix_{var} and {var}_suffix) support - Configurable router: - (default) Fast routing algorithm that allows static elements, [google custom method](https://cloud.google.com/apis/design/custom_methods), regular expressions and dynamic parameters in the URL path (e.g. /resource/name:customVerb, /meetings/{id} or /static/{subpath:*}) - Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions -- Request API for reading structs from JSON/XML and accesing parameters (path,query,header) +- Request API for reading structs from JSON/XML and accessing parameters (path,query,header) - Response API for writing structs to JSON/XML and setting headers - Customizable encoding using EntityReaderWriter registration - Filters for intercepting the request → response flow on Service or Route level @@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration +- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function ## How to customize There are several hooks to customize the behavior of the go-restful package. @@ -94,12 +95,11 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .` - -TODO: write examples of these. +- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` ## Resources +- [Example programs](./examples) - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) - [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful) @@ -108,4 +108,4 @@ TODO: write examples of these. Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2020, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2022, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/v3/SECURITY.md b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md new file mode 100644 index 000000000..810d3b510 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +| Version | Supported | +| ------- | ------------------ | +| v3.7.x | :white_check_mark: | +| < v3.0.1 | :x: | + +## Reporting a Vulnerability + +Create an Issue and put the label `[security]` in the title of the issue. +Valid reported security issues are expected to be solved within a week. diff --git a/vendor/github.com/emicklei/go-restful/Srcfile b/vendor/github.com/emicklei/go-restful/v3/Srcfile similarity index 100% rename from vendor/github.com/emicklei/go-restful/Srcfile rename to vendor/github.com/emicklei/go-restful/v3/Srcfile diff --git a/vendor/github.com/emicklei/go-restful/bench_test.sh b/vendor/github.com/emicklei/go-restful/v3/bench_test.sh similarity index 100% rename from vendor/github.com/emicklei/go-restful/bench_test.sh rename to vendor/github.com/emicklei/go-restful/v3/bench_test.sh diff --git a/vendor/github.com/emicklei/go-restful/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go similarity index 92% rename from vendor/github.com/emicklei/go-restful/compress.go rename to vendor/github.com/emicklei/go-restful/v3/compress.go index 220b37712..1ff239f99 100644 --- a/vendor/github.com/emicklei/go-restful/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -83,7 +83,11 @@ func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error } // WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested. -func wantsCompressedResponse(httpRequest *http.Request) (bool, string) { +// It also inspects the httpWriter whether its content-encoding is already set (non-empty). +func wantsCompressedResponse(httpRequest *http.Request, httpWriter http.ResponseWriter) (bool, string) { + if contentEncoding := httpWriter.Header().Get(HEADER_ContentEncoding); contentEncoding != "" { + return false, "" + } header := httpRequest.Header.Get(HEADER_AcceptEncoding) gi := strings.Index(header, ENCODING_GZIP) zi := strings.Index(header, ENCODING_DEFLATE) diff --git a/vendor/github.com/emicklei/go-restful/compressor_cache.go b/vendor/github.com/emicklei/go-restful/v3/compressor_cache.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_cache.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_cache.go diff --git a/vendor/github.com/emicklei/go-restful/compressor_pools.go b/vendor/github.com/emicklei/go-restful/v3/compressor_pools.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressor_pools.go rename to vendor/github.com/emicklei/go-restful/v3/compressor_pools.go diff --git a/vendor/github.com/emicklei/go-restful/compressors.go b/vendor/github.com/emicklei/go-restful/v3/compressors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/compressors.go rename to vendor/github.com/emicklei/go-restful/v3/compressors.go diff --git a/vendor/github.com/emicklei/go-restful/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/constants.go rename to vendor/github.com/emicklei/go-restful/v3/constants.go diff --git a/vendor/github.com/emicklei/go-restful/container.go b/vendor/github.com/emicklei/go-restful/v3/container.go similarity index 97% rename from vendor/github.com/emicklei/go-restful/container.go rename to vendor/github.com/emicklei/go-restful/v3/container.go index afca312a4..dd56246dd 100644 --- a/vendor/github.com/emicklei/go-restful/container.go +++ b/vendor/github.com/emicklei/go-restful/v3/container.go @@ -14,7 +14,7 @@ import ( "strings" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Container holds a collection of WebServices and a http.ServeMux to dispatch http requests. @@ -261,7 +261,7 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R contentEncodingEnabled = *route.contentEncodingEnabled } if contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) @@ -287,7 +287,12 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R allFilters = append(allFilters, c.containerFilters...) allFilters = append(allFilters, webService.filters...) allFilters = append(allFilters, route.Filters...) - chain := FilterChain{Filters: allFilters, Target: route.Function} + chain := FilterChain{ + Filters: allFilters, + Target: route.Function, + ParameterDocs: route.ParameterDocs, + Operation: route.Operation, + } chain.ProcessFilter(wrappedRequest, wrappedResponse) } else { // no filters, handle request by route @@ -327,7 +332,7 @@ func (c *Container) ServeHTTP(httpWriter http.ResponseWriter, httpRequest *http. } }() - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) @@ -360,7 +365,7 @@ func (c *Container) Handle(pattern string, handler http.Handler) { }() if c.contentEncodingEnabled { - doCompress, encoding := wantsCompressedResponse(httpRequest) + doCompress, encoding := wantsCompressedResponse(httpRequest, httpWriter) if doCompress { var err error writer, err = NewCompressingResponseWriter(httpWriter, encoding) diff --git a/vendor/github.com/emicklei/go-restful/cors_filter.go b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go similarity index 81% rename from vendor/github.com/emicklei/go-restful/cors_filter.go rename to vendor/github.com/emicklei/go-restful/v3/cors_filter.go index 1efeef072..9d18dfb7b 100644 --- a/vendor/github.com/emicklei/go-restful/cors_filter.go +++ b/vendor/github.com/emicklei/go-restful/v3/cors_filter.go @@ -18,9 +18,22 @@ import ( // http://enable-cors.org/server.html // http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request type CrossOriginResourceSharing struct { - ExposeHeaders []string // list of Header names - AllowedHeaders []string // list of Header names - AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed. + ExposeHeaders []string // list of Header names + + // AllowedHeaders is alist of Header names. Checking is case-insensitive. + // The list may contain the special wildcard string ".*" ; all is allowed + AllowedHeaders []string + + // AllowedDomains is a list of allowed values for Http Origin. + // The list may contain the special wildcard string ".*" ; all is allowed + // If empty all are allowed. + AllowedDomains []string + + // AllowedDomainFunc is optional and is a function that will do the check + // when the origin is not part of the AllowedDomains and it does not contain the wildcard ".*". + AllowedDomainFunc func(origin string) bool + + // AllowedMethods is either empty or has a list of http methods names. Checking is case-insensitive. AllowedMethods []string MaxAge int // number of seconds before requiring new Options request CookiesAllowed bool @@ -119,36 +132,24 @@ func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool { if len(origin) == 0 { return false } + lowerOrigin := strings.ToLower(origin) if len(c.AllowedDomains) == 0 { + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(lowerOrigin) + } return true } - allowed := false + // exact match on each allowed domain for _, domain := range c.AllowedDomains { - if domain == origin { - allowed = true - break + if domain == ".*" || strings.ToLower(domain) == lowerOrigin { + return true } } - - if !allowed { - if len(c.allowedOriginPatterns) == 0 { - // compile allowed domains to allowed origin patterns - allowedOriginRegexps, err := compileRegexps(c.AllowedDomains) - if err != nil { - return false - } - c.allowedOriginPatterns = allowedOriginRegexps - } - - for _, pattern := range c.allowedOriginPatterns { - if allowed = pattern.MatchString(origin); allowed { - break - } - } + if c.AllowedDomainFunc != nil { + return c.AllowedDomainFunc(origin) } - - return allowed + return false } func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) { @@ -184,19 +185,9 @@ func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header str if strings.ToLower(each) == strings.ToLower(header) { return true } - } - return false -} - -// Take a list of strings and compile them into a list of regular expressions. -func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) { - regexps := []*regexp.Regexp{} - for _, regexpStr := range regexpStrings { - r, err := regexp.Compile(regexpStr) - if err != nil { - return regexps, err + if each == "*" { + return true } - regexps = append(regexps, r) } - return regexps, nil + return false } diff --git a/vendor/github.com/emicklei/go-restful/coverage.sh b/vendor/github.com/emicklei/go-restful/v3/coverage.sh similarity index 100% rename from vendor/github.com/emicklei/go-restful/coverage.sh rename to vendor/github.com/emicklei/go-restful/v3/coverage.sh diff --git a/vendor/github.com/emicklei/go-restful/curly.go b/vendor/github.com/emicklei/go-restful/v3/curly.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/curly.go rename to vendor/github.com/emicklei/go-restful/v3/curly.go diff --git a/vendor/github.com/emicklei/go-restful/curly_route.go b/vendor/github.com/emicklei/go-restful/v3/curly_route.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/curly_route.go rename to vendor/github.com/emicklei/go-restful/v3/curly_route.go diff --git a/vendor/github.com/emicklei/go-restful/custom_verb.go b/vendor/github.com/emicklei/go-restful/v3/custom_verb.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/custom_verb.go rename to vendor/github.com/emicklei/go-restful/v3/custom_verb.go diff --git a/vendor/github.com/emicklei/go-restful/doc.go b/vendor/github.com/emicklei/go-restful/v3/doc.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/doc.go rename to vendor/github.com/emicklei/go-restful/v3/doc.go index f7c16b01f..69b13057d 100644 --- a/vendor/github.com/emicklei/go-restful/doc.go +++ b/vendor/github.com/emicklei/go-restful/v3/doc.go @@ -28,7 +28,7 @@ This package has the logic to find the best matching Route and if found, call it The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/user-resource/restful-user-resource.go with a full implementation. Regular expression matching Routes @@ -82,7 +82,7 @@ These are processed before calling the function associated with the Route. // install 2 chained route filters (processed before calling findUser) ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations. +See the example https://github.com/emicklei/go-restful/blob/v3/examples/filters/restful-filters.go with full implementations. Response Encoding @@ -93,7 +93,7 @@ Two encodings are supported: gzip and deflate. To enable this for all responses: If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding. Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route. -See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go +See the example https://github.com/emicklei/go-restful/blob/v3/examples/encoding/restful-encoding-filter.go OPTIONS support diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/entity_accessors.go rename to vendor/github.com/emicklei/go-restful/v3/entity_accessors.go diff --git a/vendor/github.com/emicklei/go-restful/v3/extensions.go b/vendor/github.com/emicklei/go-restful/v3/extensions.go new file mode 100644 index 000000000..5023fa049 --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/extensions.go @@ -0,0 +1,21 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +// ExtensionProperties provides storage of vendor extensions for entities +type ExtensionProperties struct { + // Extensions vendor extensions used to describe extra functionality + // (https://swagger.io/docs/specification/2-0/swagger-extensions/) + Extensions map[string]interface{} +} + +// AddExtension adds or updates a key=value pair to the extension map. +func (ep *ExtensionProperties) AddExtension(key string, value interface{}) { + if ep.Extensions == nil { + ep.Extensions = map[string]interface{}{key: value} + } else { + ep.Extensions[key] = value + } +} diff --git a/vendor/github.com/emicklei/go-restful/filter.go b/vendor/github.com/emicklei/go-restful/v3/filter.go similarity index 79% rename from vendor/github.com/emicklei/go-restful/filter.go rename to vendor/github.com/emicklei/go-restful/v3/filter.go index c23bfb591..fd88c536c 100644 --- a/vendor/github.com/emicklei/go-restful/filter.go +++ b/vendor/github.com/emicklei/go-restful/v3/filter.go @@ -6,9 +6,11 @@ package restful // FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction. type FilterChain struct { - Filters []FilterFunction // ordered list of FilterFunction - Index int // index into filters that is currently in progress - Target RouteFunction // function to call after passing all filters + Filters []FilterFunction // ordered list of FilterFunction + Index int // index into filters that is currently in progress + Target RouteFunction // function to call after passing all filters + ParameterDocs []*Parameter // the parameter docs for the route + Operation string // the name of the operation } // ProcessFilter passes the request,response pair through the next of Filters. diff --git a/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go new file mode 100644 index 000000000..c246512fc --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/filter_adapter.go @@ -0,0 +1,21 @@ +package restful + +import ( + "net/http" +) + +// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler +type HttpMiddlewareHandler func(http.Handler) http.Handler + +// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction. +func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction { + return func(req *Request, resp *Response, chain *FilterChain) { + next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + req.Request = r + resp.ResponseWriter = rw + chain.ProcessFilter(req, resp) + }) + + middleware(next).ServeHTTP(resp.ResponseWriter, req.Request) + } +} diff --git a/vendor/github.com/emicklei/go-restful/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/json.go rename to vendor/github.com/emicklei/go-restful/v3/json.go diff --git a/vendor/github.com/emicklei/go-restful/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/jsoniter.go rename to vendor/github.com/emicklei/go-restful/v3/jsoniter.go diff --git a/vendor/github.com/emicklei/go-restful/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/jsr311.go rename to vendor/github.com/emicklei/go-restful/v3/jsr311.go index 9cfd59a1c..07a0c91e9 100644 --- a/vendor/github.com/emicklei/go-restful/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -151,6 +151,16 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } + // if POST,PUT,PATCH without body + method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") + if (method == http.MethodPost || + method == http.MethodPut || + method == http.MethodPatch) && length == "" { + return nil, NewError( + http.StatusUnsupportedMediaType, + fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), + ) + } return nil, NewError( http.StatusNotAcceptable, fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/emicklei/go-restful/log/log.go b/vendor/github.com/emicklei/go-restful/v3/log/log.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/log/log.go rename to vendor/github.com/emicklei/go-restful/v3/log/log.go diff --git a/vendor/github.com/emicklei/go-restful/logger.go b/vendor/github.com/emicklei/go-restful/v3/logger.go similarity index 95% rename from vendor/github.com/emicklei/go-restful/logger.go rename to vendor/github.com/emicklei/go-restful/v3/logger.go index 6595df002..29202726f 100644 --- a/vendor/github.com/emicklei/go-restful/logger.go +++ b/vendor/github.com/emicklei/go-restful/v3/logger.go @@ -4,7 +4,7 @@ package restful // Use of this source code is governed by a license // that can be found in the LICENSE file. import ( - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) var trace bool = false diff --git a/vendor/github.com/emicklei/go-restful/mime.go b/vendor/github.com/emicklei/go-restful/v3/mime.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/mime.go rename to vendor/github.com/emicklei/go-restful/v3/mime.go diff --git a/vendor/github.com/emicklei/go-restful/options_filter.go b/vendor/github.com/emicklei/go-restful/v3/options_filter.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/options_filter.go rename to vendor/github.com/emicklei/go-restful/v3/options_filter.go diff --git a/vendor/github.com/emicklei/go-restful/parameter.go b/vendor/github.com/emicklei/go-restful/v3/parameter.go similarity index 56% rename from vendor/github.com/emicklei/go-restful/parameter.go rename to vendor/github.com/emicklei/go-restful/v3/parameter.go index e8793304b..0b851bb43 100644 --- a/vendor/github.com/emicklei/go-restful/parameter.go +++ b/vendor/github.com/emicklei/go-restful/v3/parameter.go @@ -1,5 +1,7 @@ package restful +import "sort" + // Copyright 2013 Ernest Micklei. All rights reserved. // Use of this source code is governed by a license // that can be found in the LICENSE file. @@ -20,6 +22,9 @@ const ( // FormParameterKind = indicator of Request parameter type "form" FormParameterKind + // MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data" + MultiPartFormParameterKind + // CollectionFormatCSV comma separated values `foo,bar` CollectionFormatCSV = CollectionFormat("csv") @@ -52,13 +57,25 @@ type Parameter struct { // ParameterData represents the state of a Parameter. // It is made public to make it accessible to e.g. the Swagger package. type ParameterData struct { + ExtensionProperties Name, Description, DataType, DataFormat string Kind int Required bool - AllowableValues map[string]string - AllowMultiple bool - DefaultValue string - CollectionFormat string + // AllowableValues is deprecated. Use PossibleValues instead + AllowableValues map[string]string + PossibleValues []string + AllowMultiple bool + AllowEmptyValue bool + DefaultValue string + CollectionFormat string + Pattern string + Minimum *float64 + Maximum *float64 + MinLength *int64 + MaxLength *int64 + MinItems *int64 + MaxItems *int64 + UniqueItems bool } // Data returns the state of the Parameter @@ -94,6 +111,11 @@ func (p *Parameter) beForm() *Parameter { return p } +func (p *Parameter) beMultiPartForm() *Parameter { + p.data.Kind = MultiPartFormParameterKind + return p +} + // Required sets the required field and returns the receiver func (p *Parameter) Required(required bool) *Parameter { p.data.Required = required @@ -106,9 +128,38 @@ func (p *Parameter) AllowMultiple(multiple bool) *Parameter { return p } -// AllowableValues sets the allowableValues field and returns the receiver +// AddExtension adds or updates a key=value pair to the extension map +func (p *Parameter) AddExtension(key string, value interface{}) *Parameter { + p.data.AddExtension(key, value) + return p +} + +// AllowEmptyValue sets the AllowEmptyValue field and returns the receiver +func (p *Parameter) AllowEmptyValue(multiple bool) *Parameter { + p.data.AllowEmptyValue = multiple + return p +} + +// AllowableValues is deprecated. Use PossibleValues instead. Both will be set. func (p *Parameter) AllowableValues(values map[string]string) *Parameter { p.data.AllowableValues = values + + allowableSortedKeys := make([]string, 0, len(values)) + for k := range values { + allowableSortedKeys = append(allowableSortedKeys, k) + } + sort.Strings(allowableSortedKeys) + + p.data.PossibleValues = make([]string, 0, len(values)) + for _, k := range allowableSortedKeys { + p.data.PossibleValues = append(p.data.PossibleValues, values[k]) + } + return p +} + +// PossibleValues sets the possible values field and returns the receiver +func (p *Parameter) PossibleValues(values []string) *Parameter { + p.data.PossibleValues = values return p } @@ -141,3 +192,51 @@ func (p *Parameter) CollectionFormat(format CollectionFormat) *Parameter { p.data.CollectionFormat = format.String() return p } + +// Pattern sets the pattern field and returns the receiver +func (p *Parameter) Pattern(pattern string) *Parameter { + p.data.Pattern = pattern + return p +} + +// Minimum sets the minimum field and returns the receiver +func (p *Parameter) Minimum(minimum float64) *Parameter { + p.data.Minimum = &minimum + return p +} + +// Maximum sets the maximum field and returns the receiver +func (p *Parameter) Maximum(maximum float64) *Parameter { + p.data.Maximum = &maximum + return p +} + +// MinLength sets the minLength field and returns the receiver +func (p *Parameter) MinLength(minLength int64) *Parameter { + p.data.MinLength = &minLength + return p +} + +// MaxLength sets the maxLength field and returns the receiver +func (p *Parameter) MaxLength(maxLength int64) *Parameter { + p.data.MaxLength = &maxLength + return p +} + +// MinItems sets the minItems field and returns the receiver +func (p *Parameter) MinItems(minItems int64) *Parameter { + p.data.MinItems = &minItems + return p +} + +// MaxItems sets the maxItems field and returns the receiver +func (p *Parameter) MaxItems(maxItems int64) *Parameter { + p.data.MaxItems = &maxItems + return p +} + +// UniqueItems sets the uniqueItems field and returns the receiver +func (p *Parameter) UniqueItems(uniqueItems bool) *Parameter { + p.data.UniqueItems = uniqueItems + return p +} diff --git a/vendor/github.com/emicklei/go-restful/path_expression.go b/vendor/github.com/emicklei/go-restful/v3/path_expression.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/path_expression.go rename to vendor/github.com/emicklei/go-restful/v3/path_expression.go diff --git a/vendor/github.com/emicklei/go-restful/path_processor.go b/vendor/github.com/emicklei/go-restful/v3/path_processor.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/path_processor.go rename to vendor/github.com/emicklei/go-restful/v3/path_processor.go diff --git a/vendor/github.com/emicklei/go-restful/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go similarity index 85% rename from vendor/github.com/emicklei/go-restful/request.go rename to vendor/github.com/emicklei/go-restful/v3/request.go index a20730feb..5725a0759 100644 --- a/vendor/github.com/emicklei/go-restful/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -13,10 +13,10 @@ var defaultRequestContentType string // Request is a wrapper for a http Request that provides convenience methods type Request struct { - Request *http.Request - pathParameters map[string]string - attributes map[string]interface{} // for storing request-scoped values - selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees + Request *http.Request + pathParameters map[string]string + attributes map[string]interface{} // for storing request-scoped values + selectedRoute *Route // is nil when no route was matched } func NewRequest(httpRequest *http.Request) *Request { @@ -113,6 +113,20 @@ func (r Request) Attribute(name string) interface{} { } // SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees +// If no route was matched then return an empty string. func (r Request) SelectedRoutePath() string { - return r.selectedRoutePath + if r.selectedRoute == nil { + return "" + } + // skip creating an accessor + return r.selectedRoute.Path +} + +// SelectedRoute returns a reader to access the selected Route by the container +// Returns nil if no route was matched. +func (r Request) SelectedRoute() RouteReader { + if r.selectedRoute == nil { + return nil + } + return routeAccessor{route: r.selectedRoute} } diff --git a/vendor/github.com/emicklei/go-restful/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go similarity index 99% rename from vendor/github.com/emicklei/go-restful/response.go rename to vendor/github.com/emicklei/go-restful/v3/response.go index e2f78f00f..8f0b56aa2 100644 --- a/vendor/github.com/emicklei/go-restful/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -175,7 +175,7 @@ func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType } // WriteError writes the http status and the error string on the response. err can be nil. -// Return an error if writing was not succesful. +// Return an error if writing was not successful. func (r *Response) WriteError(httpStatus int, err error) (writeErr error) { r.err = err if err == nil { diff --git a/vendor/github.com/emicklei/go-restful/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go similarity index 97% rename from vendor/github.com/emicklei/go-restful/route.go rename to vendor/github.com/emicklei/go-restful/v3/route.go index 598aa57a7..193f4a6b0 100644 --- a/vendor/github.com/emicklei/go-restful/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -19,6 +19,7 @@ type RouteSelectionConditionFunction func(httpRequest *http.Request) bool // Route binds a HTTP Method,Path,Consumes combination to a RouteFunction. type Route struct { + ExtensionProperties Method string Produces []string Consumes []string @@ -69,7 +70,7 @@ func (r *Route) postBuild() { func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request, pathParams map[string]string) (*Request, *Response) { wrappedRequest := NewRequest(httpRequest) wrappedRequest.pathParameters = pathParams - wrappedRequest.selectedRoutePath = r.Path + wrappedRequest.selectedRoute = r wrappedResponse := NewResponse(httpWriter) wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept) wrappedResponse.routeProduces = r.Produces @@ -167,11 +168,11 @@ func tokenizePath(path string) []string { } // for debugging -func (r Route) String() string { +func (r *Route) String() string { return r.Method + " " + r.Path } // EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses. Overrides the container.contentEncodingEnabled value. -func (r Route) EnableContentEncoding(enabled bool) { +func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } diff --git a/vendor/github.com/emicklei/go-restful/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go similarity index 96% rename from vendor/github.com/emicklei/go-restful/route_builder.go rename to vendor/github.com/emicklei/go-restful/v3/route_builder.go index 1d67a4c23..23641b6dd 100644 --- a/vendor/github.com/emicklei/go-restful/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -12,7 +12,7 @@ import ( "strings" "sync/atomic" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // RouteBuilder is a helper to construct Routes. @@ -38,6 +38,7 @@ type RouteBuilder struct { errorMap map[int]ResponseError defaultResponse *ResponseError metadata map[string]interface{} + extensions map[string]interface{} deprecated bool contentEncodingEnabled *bool } @@ -204,13 +205,22 @@ func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder { return b } +// AddExtension adds or updates a key=value pair to the extensions map. +func (b *RouteBuilder) AddExtension(key string, value interface{}) *RouteBuilder { + if b.extensions == nil { + b.extensions = map[string]interface{}{} + } + b.extensions[key] = value + return b +} + // Deprecate sets the value of deprecated to true. Deprecated routes have a special UI treatment to warn against use func (b *RouteBuilder) Deprecate() *RouteBuilder { b.deprecated = true return b } -// AllowedMethodsWithoutContentType overides the default list GET,HEAD,OPTIONS,DELETE,TRACE +// AllowedMethodsWithoutContentType overrides the default list GET,HEAD,OPTIONS,DELETE,TRACE // If a request does not include a content-type header then // depending on the method, it may return a 415 Unsupported Media. // Must have uppercase HTTP Method names such as GET,HEAD,OPTIONS,... @@ -221,6 +231,7 @@ func (b *RouteBuilder) AllowedMethodsWithoutContentType(methods []string) *Route // ResponseError represents a response; not necessarily an error. type ResponseError struct { + ExtensionProperties Code int Message string Model interface{} @@ -335,6 +346,7 @@ func (b *RouteBuilder) Build() Route { contentEncodingEnabled: b.contentEncodingEnabled, allowedMethodsWithoutContentType: b.allowedMethodsWithoutContentType, } + route.Extensions = b.extensions route.postBuild() return route } diff --git a/vendor/github.com/emicklei/go-restful/v3/route_reader.go b/vendor/github.com/emicklei/go-restful/v3/route_reader.go new file mode 100644 index 000000000..c9f4ee75f --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/v3/route_reader.go @@ -0,0 +1,66 @@ +package restful + +// Copyright 2021 Ernest Micklei. All rights reserved. +// Use of this source code is governed by a license +// that can be found in the LICENSE file. + +type RouteReader interface { + Method() string + Consumes() []string + Path() string + Doc() string + Notes() string + Operation() string + ParameterDocs() []*Parameter + // Returns a copy + Metadata() map[string]interface{} + Deprecated() bool +} + +type routeAccessor struct { + route *Route +} + +func (r routeAccessor) Method() string { + return r.route.Method +} +func (r routeAccessor) Consumes() []string { + return r.route.Consumes[:] +} +func (r routeAccessor) Path() string { + return r.route.Path +} +func (r routeAccessor) Doc() string { + return r.route.Doc +} +func (r routeAccessor) Notes() string { + return r.route.Notes +} +func (r routeAccessor) Operation() string { + return r.route.Operation +} +func (r routeAccessor) ParameterDocs() []*Parameter { + return r.route.ParameterDocs[:] +} + +// Returns a copy +func (r routeAccessor) Metadata() map[string]interface{} { + return copyMap(r.route.Metadata) +} +func (r routeAccessor) Deprecated() bool { + return r.route.Deprecated +} + +// https://stackoverflow.com/questions/23057785/how-to-copy-a-map +func copyMap(m map[string]interface{}) map[string]interface{} { + cp := make(map[string]interface{}) + for k, v := range m { + vm, ok := v.(map[string]interface{}) + if ok { + cp[k] = copyMap(vm) + } else { + cp[k] = v + } + } + return cp +} diff --git a/vendor/github.com/emicklei/go-restful/router.go b/vendor/github.com/emicklei/go-restful/v3/router.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/router.go rename to vendor/github.com/emicklei/go-restful/v3/router.go diff --git a/vendor/github.com/emicklei/go-restful/service_error.go b/vendor/github.com/emicklei/go-restful/v3/service_error.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/service_error.go rename to vendor/github.com/emicklei/go-restful/v3/service_error.go diff --git a/vendor/github.com/emicklei/go-restful/web_service.go b/vendor/github.com/emicklei/go-restful/v3/web_service.go similarity index 91% rename from vendor/github.com/emicklei/go-restful/web_service.go rename to vendor/github.com/emicklei/go-restful/v3/web_service.go index 2c164a2a2..789c4df25 100644 --- a/vendor/github.com/emicklei/go-restful/web_service.go +++ b/vendor/github.com/emicklei/go-restful/v3/web_service.go @@ -6,7 +6,7 @@ import ( "reflect" "sync" - "github.com/emicklei/go-restful/log" + "github.com/emicklei/go-restful/v3/log" ) // Copyright 2013 Ernest Micklei. All rights reserved. @@ -165,6 +165,18 @@ func FormParameter(name, description string) *Parameter { return p } +// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes. +// It is initialized as required with string as its DataType. +func (w *WebService) MultiPartFormParameter(name, description string) *Parameter { + return MultiPartFormParameter(name, description) +} + +func MultiPartFormParameter(name, description string) *Parameter { + p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}} + p.beMultiPartForm() + return p +} + // Route creates a new Route using the RouteBuilder and add to the ordered list of Routes. func (w *WebService) Route(builder *RouteBuilder) *WebService { w.routesLock.Lock() @@ -176,22 +188,20 @@ func (w *WebService) Route(builder *RouteBuilder) *WebService { // RemoveRoute removes the specified route, looks for something that matches 'path' and 'method' func (w *WebService) RemoveRoute(path, method string) error { - if !w.dynamicRoutes { - return errors.New("dynamic routes are not enabled.") - } - w.routesLock.Lock() - defer w.routesLock.Unlock() - newRoutes := make([]Route, (len(w.routes) - 1)) - current := 0 - for ix := range w.routes { - if w.routes[ix].Method == method && w.routes[ix].Path == path { - continue - } - newRoutes[current] = w.routes[ix] - current++ - } - w.routes = newRoutes - return nil + if !w.dynamicRoutes { + return errors.New("dynamic routes are not enabled.") + } + w.routesLock.Lock() + defer w.routesLock.Unlock() + newRoutes := []Route{} + for _, route := range w.routes { + if route.Method == method && route.Path == path { + continue + } + newRoutes = append(newRoutes, route) + } + w.routes = newRoutes + return nil } // Method creates a new RouteBuilder and initialize its http method diff --git a/vendor/github.com/emicklei/go-restful/web_service_container.go b/vendor/github.com/emicklei/go-restful/v3/web_service_container.go similarity index 100% rename from vendor/github.com/emicklei/go-restful/web_service_container.go rename to vendor/github.com/emicklei/go-restful/v3/web_service_container.go diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 000000000..7accdb0c4 --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,787 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = invokeMarshaler(v) + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = invokeStringer(v) + case error: + value = invokeError(v) + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + k, _ := v[i].(string) // sanitize() above means no need to check success + // arbitrary keys might need escaping + buf.WriteString(prettyString(k)) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func invokeMarshaler(m logr.Marshaler) (ret interface{}) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return m.MarshalLog() +} + +func invokeStringer(s fmt.Stringer) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return s.String() +} + +func invokeError(e error) (ret string) { + defer func() { + if r := recover(); r != nil { + ret = fmt.Sprintf("", r) + } + }() + return e.Error() +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe06..000000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381aee5..013fc1943 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b9..000000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go new file mode 100644 index 000000000..fb376fce2 --- /dev/null +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -0,0 +1,64 @@ +package internal + +import ( + "net/url" + "regexp" + "strings" +) + +const ( + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) + +// NormalizeURL will normalize the specified URL +// This was added to replace a previous call to the no longer maintained purell library: +// The call that was used looked like the following: +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// To explain all that was included in the call above, purell.FlagsSafe was really just the following: +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +func NormalizeURL(u *url.URL) { + lowercaseScheme(u) + lowercaseHost(u) + removeDefaultPort(u) + removeDuplicateSlashes(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { + return "" + } + return val + }) + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go index 3bc0a6e26..cfdef03e5 100644 --- a/vendor/github.com/go-openapi/jsonreference/reference.go +++ b/vendor/github.com/go-openapi/jsonreference/reference.go @@ -30,8 +30,8 @@ import ( "net/url" "strings" - "github.com/PuerkitoBio/purell" "github.com/go-openapi/jsonpointer" + "github.com/go-openapi/jsonreference/internal" ) const ( @@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error { return err } - r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) + internal.NormalizeURL(parsed) + + r.referenceURL = parsed refURL := r.referenceURL if refURL.Scheme != "" && refURL.Host != "" { diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 2a4a71f3a..bf503e400 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -48,3 +48,7 @@ linters: - goimports - tenv - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c501..55094cb74 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 9a6040972..00038c377 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 193702f2c..f78ab684a 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index ec9691440..f09ee609f 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,126 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +215,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +390,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 86d0903b8..087320da7 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -13,21 +13,21 @@ // // The primary features of cmp are: // -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. +// - When the default behavior of equality does not suit the test's needs, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as +// they are within some tolerance of each other. // -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. +// - Types with an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation +// for the types that they define. // -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. +// - If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on +// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, +// unexported fields are not compared by default; they result in panics +// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported) +// or explicitly compared using the Exporter option. package cmp import ( @@ -36,33 +36,34 @@ import ( "strings" "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" "github.com/google/go-cmp/cmp/internal/function" "github.com/google/go-cmp/cmp/internal/value" ) +// TODO(≥go1.18): Use any instead of interface{}. + // Equal reports whether x and y are equal by recursively applying the // following rules in the given order to x and y and all of their sub-values: // -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. +// - Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is non-zero, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform +// the current values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. // -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. +// - If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. // -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. +// - Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, +// and channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. // // Structs are equal if recursively calling Equal on all fields report equal. // If a struct contains unexported fields, Equal panics unless an Ignore option @@ -143,7 +144,7 @@ func rootStep(x, y interface{}) PathStep { // so that they have the same parent type. var t reflect.Type if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() + t = anyType if vx.IsValid() { vvx := reflect.New(t).Elem() vvx.Set(vx) @@ -319,7 +320,6 @@ func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { } func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{v})[0] } @@ -343,8 +343,6 @@ func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { } func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) if !s.dynChecker.Next() { return f.Call([]reflect.Value{x, y})[0].Bool() } @@ -372,19 +370,6 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { ret = f.Call(vs)[0] } -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { var addr bool var vax, vay reflect.Value // Addressable versions of vx and vy @@ -654,7 +639,9 @@ type dynChecker struct{ curr, next int } // Next increments the state and reports whether a check should be performed. // // Checks occur every Nth function call, where N is a triangular number: +// // 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// // See https://en.wikipedia.org/wiki/Triangular_number // // This sequence ensures that the cost of checks drops significantly as diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index 5ff0b4218..ae851fe53 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 21eb54858..e2c0f74e8 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index 1daaaacc5..36062a604 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !cmp_debug // +build !cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 4b91dbcac..a3b97a1ad 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build cmp_debug // +build cmp_debug package diff diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index bc196b16c..a248e5436 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // This function returns an edit-script, which is a sequence of operations // needed to convert one list into the other. The following invariants for // the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() +// - eq == (es.Dist()==0) +// - nx == es.LenX() +// - ny == es.LenY() // // This algorithm is not guaranteed to be an optimal solution (i.e., one that // produces an edit-script with a minimal Levenshtein distance). This algorithm @@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A diagonal edge is equivalent to a matching symbol between both X and Y. // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny // // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y + // - fwdFrontier.X < revFrontier.X + // - fwdFrontier.Y < revFrontier.Y + // // Unless, it is time for the algorithm to terminate. fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} revPath := path{-1, point{nx, ny}, make(EditScript, 0)} @@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // computing sub-optimal edit-scripts between two lists. // // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. + // - Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). + // The goal of the search is connect with the search + // from the opposite corner. + // - As we search, we build a path in a greedy manner, + // where the first match seen is added to the path (this is sub-optimal, + // but provides a decent result in practice). When matches are found, + // we try the next pair of symbols in the lists and follow all matches + // as far as possible. + // - When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, + // we advance the frontier towards the opposite corner. + // - This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed @@ -389,6 +392,7 @@ type point struct{ X, Y int } func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } // zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// // [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] func zigzag(x int) int { if x&1 != 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f0529..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index b6c12cefb..7b498bb2c 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -9,6 +9,8 @@ import ( "strconv" ) +var anyType = reflect.TypeOf((*interface{})(nil)).Elem() + // TypeString is nearly identical to reflect.Type.String, // but has an additional option to specify that full type names be used. func TypeString(t reflect.Type, qualified bool) string { @@ -20,6 +22,11 @@ func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte // of the same name and within the same package, // but declared within the namespace of different functions. + // Use the "any" alias instead of "interface{}" for better readability. + if t == anyType { + return append(b, "any"...) + } + // Named type. if t.Name() != "" { if qualified && t.PkgPath() != "" { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index 44f4a5afd..1a71bfcbd 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego // +build purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index a605953d4..16e6860af 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego // +build !purego package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a2997..000000000 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index e57b9eb53..1f9ca9c48 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -33,6 +33,7 @@ type Option interface { } // applicableOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Grouping: Options type applicableOption interface { @@ -43,6 +44,7 @@ type applicableOption interface { } // coreOption represents the following types: +// // Fundamental: ignore | validator | *comparer | *transformer // Filters: *pathFilter | *valuesFilter type coreOption interface { @@ -336,9 +338,9 @@ func (tr transformer) String() string { // both implement T. // // The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y +// - Symmetric: equal(x, y) == equal(y, x) +// - Deterministic: equal(x, y) == equal(x, y) +// - Pure: equal(x, y) does not modify x or y func Comparer(f interface{}) Option { v := reflect.ValueOf(f) if !function.IsType(v.Type(), function.Equal) || v.IsNil() { @@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option { } // Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). +// is provided by cmp when calling Report (see Reporter). type Result struct { _ [0]func() // Make Result incomparable flags resultFlags diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index f01eff318..a0a588502 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -41,13 +41,13 @@ type PathStep interface { // The type of each valid value is guaranteed to be identical to Type. // // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. + // - For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // an Exporter to traverse unexported fields. + // - For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // - For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. // // The provided values must not be mutated. Values() (vx, vy reflect.Value) @@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep { // The simplified path only contains struct field accesses. // // For example: +// // MyMap.MySlices.MyField func (pa Path) String() string { var ss []string @@ -108,6 +109,7 @@ func (pa Path) String() string { // GoString returns the path to a specific node using Go syntax. // // For example: +// // (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField func (pa Path) GoString() string { var ssPre, ssPost []string @@ -159,7 +161,7 @@ func (ps pathStep) String() string { if ps.typ == nil { return "" } - s := ps.typ.String() + s := value.TypeString(ps.typ, false) if s == "" || strings.ContainsAny(s, "{}\n") { return "root" // Type too simple or complex to print } @@ -178,7 +180,7 @@ type structField struct { unexported bool mayForce bool // Forcibly allow visibility paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) + pvx, pvy reflect.Value // Parent values (always addressable) field reflect.StructField // Field information } @@ -282,7 +284,7 @@ type typeAssertion struct { func (ta TypeAssertion) Type() reflect.Type { return ta.typ } func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) } // Transform is a transformation from the parent type to the current type. type Transform struct{ *transform } diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 104bb3053..2050bf6b4 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -7,8 +7,6 @@ package cmp import ( "fmt" "reflect" - - "github.com/google/go-cmp/cmp/internal/value" ) // numContextRecords is the number of surrounding equal records to print. @@ -116,7 +114,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out } // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { + // As a special case, treat equal []byte as a leaf nodes. + isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType + isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0 + if v.MaxDepth == 0 || isEqualBytes { switch opts.DiffMode { case diffUnknown, diffIdentical: // Format Equal. @@ -245,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt var isZero bool switch opts.DiffMode { case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero() case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) + isZero = r.Value.ValueX.IsZero() case diffInserted: - isZero = value.IsZero(r.Value.ValueY) + isZero = r.Value.ValueY.IsZero() } if isZero { continue diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 33f03577f..2ab41fad3 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -16,6 +16,13 @@ import ( "github.com/google/go-cmp/cmp/internal/value" ) +var ( + anyType = reflect.TypeOf((*interface{})(nil)).Elem() + stringType = reflect.TypeOf((*string)(nil)).Elem() + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + byteType = reflect.TypeOf((*byte)(nil)).Elem() +) + type formatValueOptions struct { // AvoidStringer controls whether to avoid calling custom stringer // methods like error.Error or fmt.Stringer.String. @@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } for i := 0; i < v.NumField(); i++ { vv := v.Field(i) - if value.IsZero(vv) { + if vv.IsZero() { continue // Elide fields with zero values } if len(list) == maxLen { @@ -205,12 +212,13 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { + if t.Elem() == byteType { b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } + isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) } if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) + skipType = true + return opts.FormatType(t, out) } } @@ -281,7 +289,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } defer ptrs.Pop() - skipType = true // Let the underlying value print the type instead + // Skip the name only if this is an unnamed pointer type. + // Otherwise taking the address of a value does not reproduce + // the named pointer type. + if v.Type().Name() == "" { + skipType = true // Let the underlying value print the type instead + } out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) out = &textWrap{Prefix: "&", Value: out} @@ -292,7 +305,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, } // Interfaces accept different concrete types, // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) default: panic(fmt.Sprintf("%v kind not handled", v.Kind())) diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 2ad3bc85b..23e444f62 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -80,7 +80,7 @@ func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { } // Use specialized string diffing for longer slices or strings. - const minLength = 64 + const minLength = 32 return vx.Len() >= minLength && vy.Len() >= minLength } @@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { case t.Kind() == reflect.String: sx, sy = vx.String(), vy.String() isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + case t.Kind() == reflect.Slice && t.Elem() == byteType: sx, sy = string(vx.Bytes()), string(vy.Bytes()) isString = true case t.Kind() == reflect.Array: @@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { }) efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes + quotedLength := len(strconv.Quote(sx + sy)) + unquotedLength := len(sx) + len(sy) + escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength) + isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1 } } @@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { // differences in a string literal. This format is more readable, // but has edge-cases where differences are visually indistinguishable. // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace + // - A line starts with `"""` + // - A line starts with "..." + // - A line contains non-printable characters + // - Adjacent different lines differ only by whitespace // // For example: + // // """ // ... // 3 identical lines // foo @@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} switch t.Kind() { case reflect.String: - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: @@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { switch t.Kind() { case reflect.String: out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { + if t != stringType { out = opts.FormatType(t, out) } case reflect.Slice: out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { + if t != bytesType { out = opts.FormatType(t, out) } } @@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice( // {NumIdentical: 3}, // {NumInserted: 1}, // ] -// func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { var prevMode byte lastStats := func(mode byte) *diffStats { @@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) // {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, // {NumIdentical: 63}, // ] -// func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { groups, groupsOrig := groups[:0], groups for i, ds := range groupsOrig { @@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat // {NumRemoved: 9}, // {NumIdentical: 64}, // incremented by 10 // ] -// func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { var ix, iy int // indexes into sequence x and y for i, ds := range groups { @@ -563,10 +564,10 @@ func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []d nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified ny := ds.NumIdentical + ds.NumInserted + ds.NumModified var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { + for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ { numLeadingIdentical++ } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { + for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ { numTrailingIdentical++ } if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 0fd46d7ff..388fcf571 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats { // String prints a humanly-readable summary of coalesced records. // // Example: +// // diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" func (s diffStats) String() string { var ss []string diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99f..061d72ae0 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1a..97c1b34fd 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a457..b503aae7d 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 000000000..5bb365949 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f938..761520a8c 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 3b306ab3f..05a41bd6a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,168 @@ +## 2.4.0 + +### Features + +- DeferCleanup supports functions with multiple-return values [5e33c75] +- Add GinkgoLogr (#1067) [bf78c28] +- Introduction of 'MustPassRepeatedly' decorator (#1051) [047c02f] + +### Fixes +- correcting some typos (#1064) [1403d3c] +- fix flaky internal_integration interupt specs [2105ba3] +- Correct busted link in README [be6b5b9] + +### Maintenance +- Bump actions/checkout from 2 to 3 (#1062) [8a2f483] +- Bump golang.org/x/tools from 0.1.12 to 0.2.0 (#1065) [529c4e8] +- Bump github/codeql-action from 1 to 2 (#1061) [da09146] +- Bump actions/setup-go from 2 to 3 (#1060) [918040d] +- Bump github.com/onsi/gomega from 1.22.0 to 1.22.1 (#1053) [2098e4d] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#1066) [1d74122] +- Add GHA to dependabot config [4442772] + +## 2.3.1 + +## Fixes +Several users were invoking `ginkgo` by installing the latest version of the cli via `go install github.com/onsi/ginkgo/v2/ginkgo@latest`. When 2.3.0 was released this resulted in an influx of issues as CI systems failed due to a change in the internal contract between the Ginkgo CLI and the Ginkgo library. Ginkgo only supports running the same version of the library as the cli (which is why both are packaged in the same repository). + +With this patch release, the ginkgo CLI can now identify a version mismatch and emit a helpful error message. + +- Ginkgo cli can identify version mismatches and emit a helpful error message [bc4ae2f] +- further emphasize that a version match is required when running Ginkgo on CI and/or locally [2691dd8] + +### Maintenance +- bump gomega to v1.22.0 [822a937] + +## 2.3.0 + +### Interruptible Nodes and Timeouts + +Ginkgo now supports per-node and per-spec timeouts on interruptible nodes. Check out the [documentation for all the details](https://onsi.github.io/ginkgo/#spec-timeouts-and-interruptible-nodes) but the gist is you can now write specs like this: + +```go +It("is interruptible", func(ctx SpecContext) { // or context.Context instead of SpecContext, both are valid. + // do things until `ctx.Done()` is closed, for example: + req, err := http.NewRequestWithContext(ctx, "POST", "/build-widgets", nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + + Eventually(client.WidgetCount).WithContext(ctx).Should(Equal(17)) +}, NodeTimeout(time.Second*20), GracePeriod(5*time.Second)) +``` + +and have Ginkgo ensure that the node completes before the timeout elapses. If it does elapse, or if an external interrupt is received (e.g. `^C`) then Ginkgo will cancel the context and wait for the Grace Period for the node to exit before proceeding with any cleanup nodes associated with the spec. The `ctx` provided by Ginkgo can also be passed down to Gomega's `Eventually` to have all assertions within the node governed by a single deadline. + +### Features + +- Ginkgo now records any additional failures that occur during the cleanup of a failed spec. In prior versions this information was quietly discarded, but the introduction of a more rigorous approach to timeouts and interruptions allows Ginkgo to better track subsequent failures. +- `SpecContext` also provides a mechanism for third-party libraries to provide additional information when a Progress Report is generated. Gomega uses this to provide the current state of an `Eventually().WithContext()` assertion when a Progress Report is requested. +- DescribeTable now exits with an error if it is not passed any Entries [a4c9865] + +## Fixes +- fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency [92c88d5] +- Make the outline command able to use the DSL import [1be2427] + +## Maintenance +- chore(docs): delete no meaning d [57c373c] +- chore(docs): Fix hyperlinks [30526d5] +- chore(docs): fix code blocks without language settings [cf611c4] +- fix intra-doc link [b541bcb] + +## 2.2.0 + +### Generate real-time Progress Reports [f91377c] + +Ginkgo can now generate Progress Reports to point users at the current running line of code (including a preview of the actual source code) and a best guess at the most relevant subroutines. + +These Progress Reports allow users to debug stuck or slow tests without exiting the Ginkgo process. A Progress Report can be generated at any time by sending Ginkgo a `SIGINFO` (`^T` on MacOS/BSD) or `SIGUSR1`. + +In addition, the user can specify `--poll-progress-after` and `--poll-progress-interval` to have Ginkgo start periodically emitting progress reports if a given node takes too long. These can be overriden/set on a per-node basis with the `PollProgressAfter` and `PollProgressInterval` decorators. + +Progress Reports are emitted to stdout, and also stored in the machine-redable report formats that Ginkgo supports. + +Ginkgo also uses this progress reporting infrastructure under the hood when handling timeouts and interrupts. This yields much more focused, useful, and informative stack traces than previously. + +### Features +- `BeforeSuite`, `AfterSuite`, `SynchronizedBeforeSuite`, `SynchronizedAfterSuite`, and `ReportAfterSuite` now support (the relevant subset of) decorators. These can be passed in _after_ the callback functions that are usually passed into these nodes. + + As a result the **signature of these methods has changed** and now includes a trailing `args ...interface{}`. For most users simply using the DSL, this change is transparent. However if you were assigning one of these functions to a custom variable (or passing it around) then your code may need to change to reflect the new signature. + +### Maintenance +- Modernize the invocation of Ginkgo in github actions [0ffde58] +- Update reocmmended CI settings in docs [896bbb9] +- Speed up unnecessarily slow integration test [6d3a90e] + +## 2.1.6 + +### Fixes +- Add `SuppressProgressReporting` decorator to turn off --progress announcements for a given node [dfef62a] +- chore: remove duplicate word in comments [7373214] + +## 2.1.5 + +### Fixes +- drop -mod=mod instructions; fixes #1026 [6ad7138] +- Ensure `CurrentSpecReport` and `AddReportEntry` are thread-safe [817c09b] +- remove stale importmap gcflags flag test [3cd8b93] +- Always emit spec summary [5cf23e2] - even when only one spec has failed +- Fix ReportAfterSuite usage in docs [b1864ad] +- fixed typo (#997) [219cc00] +- TrimRight is not designed to trim Suffix [71ebb74] +- refactor: replace strings.Replace with strings.ReplaceAll (#978) [143d208] +- fix syntax in examples (#975) [b69554f] + +### Maintenance +- Bump github.com/onsi/gomega from 1.20.0 to 1.20.1 (#1027) [e5dfce4] +- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#1006) [7ae91c4] +- Bump github.com/onsi/gomega from 1.19.0 to 1.20.0 (#1005) [e87a85a] +- test: add new Go 1.19 to test matrix (#1014) [bbefe12] +- Bump golang.org/x/tools from 0.1.11 to 0.1.12 (#1012) [9327906] +- Bump golang.org/x/tools from 0.1.10 to 0.1.11 (#993) [f44af96] +- Bump nokogiri from 1.13.3 to 1.13.6 in /docs (#981) [ef336aa] + +## 2.1.4 + +### Fixes +- Numerous documentation typos +- Prepend `when` when using `When` (this behavior was in 1.x but unintentionally lost during the 2.0 rewrite) [efce903] +- improve error message when a parallel process fails to report back [a7bd1fe] +- guard against concurrent map writes in DeprecationTracker [0976569] +- Invoke reporting nodes during dry-run (fixes #956 and #935) [aae4480] +- Fix ginkgo import circle [f779385] + +## 2.1.3 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Calling By in a container node now emits a useful error. [ff12cee] + +## 2.1.2 + +### Fixes + +- Track location of focused specs correctly in `ginkgo unfocus` [a612ff1] +- Profiling suites with focused specs no longer generates an erroneous failure message [8fbfa02] +- Several documentation typos fixed. Big thanks to everyone who helped catch them and report/fix them! + +## 2.1.1 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +### Fixes +- Suites that only import the new dsl packages are now correctly identified as Ginkgo suites [ec17e17] + +## 2.1.0 + +See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) for details on V2. + +2.1.0 is a minor release with a few tweaks: + +- Introduce new DSL packages to enable users to pick-and-choose which portions of the DSL to dot-import. [90868e2] More details [here](https://onsi.github.io/ginkgo/#alternatives-to-dot-importing-ginkgo). +- Add error check for invalid/nil parameters to DescribeTable [6f8577e] +- Myriad docs typos fixed (thanks everyone!) [718542a, ecb7098, 146654c, a8f9913, 6bdffde, 03dcd7e] + ## 2.0.0 See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkgo/MIGRATING_TO_V2) @@ -7,7 +172,7 @@ See [https://onsi.github.io/ginkgo/MIGRATING_TO_V2](https://onsi.github.io/ginkg Ginkgo 2.0 now has a Release Candidate. 1.16.5 advertises the existence of the RC. 1.16.5 deprecates GinkgoParallelNode in favor of GinkgoParallelProcess -You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` +You can silence the RC advertisement by setting an `ACK_GINKGO_RC=true` environment variable or creating a file in your home directory called `.ack-ginkgo-rc` ## 1.16.4 @@ -27,7 +192,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme ## 1.16.1 ### Fixes -- Supress --stream deprecation warning on windows (#793) +- Suppress --stream deprecation warning on windows (#793) ## 1.16.0 @@ -114,7 +279,7 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - replace tail package with maintained one. this fixes go get errors (#667) [4ba33d4] - improve ginkgo performance - makes progress on #644 [a14f98e] - fix convert integration tests [1f8ba69] -- fix typo succesful -> successful (#663) [1ea49cf] +- fix typo successful -> successful (#663) [1ea49cf] - Fix invalid link (#658) [b886136] - convert utility : Include comments from source (#657) [1077c6d] - Explain what BDD means [d79e7fb] @@ -205,10 +370,10 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - fix: for `go vet` to pass [69338ec] - docs: fix for contributing instructions [7004cb1] - consolidate and streamline contribution docs (#494) [d848015] -- Make generated Junit file compatable with "Maven Surefire" (#488) [e51bee6] +- Make generated Junit file compatible with "Maven Surefire" (#488) [e51bee6] - all: gofmt [000d317] - Increase eventually timeout to 30s [c73579c] -- Clarify asynchronous test behaviour [294d8f4] +- Clarify asynchronous test behavior [294d8f4] - Travis badge should only show master [26d2143] ## 1.5.0 5/10/2018 @@ -226,13 +391,13 @@ You can silence the RC advertisement by setting an `ACK_GINKG_RC=true` environme - When running a test and calculating the coverage using the `-coverprofile` and `-outputdir` flags, Ginkgo fails with an error if the directory does not exist. This is due to an [issue in go 1.10](https://github.com/golang/go/issues/24588) (#446) [b36a6e0] - `unfocus` command ignores vendor folder (#459) [e5e551c, c556e43, a3b6351, 9a820dd] - Ignore packages whose tests are all ignored by go (#456) [7430ca7, 6d8be98] -- Increase the threshold when checking time measuments (#455) [2f714bf, 68f622c] +- Increase the threshold when checking time measurements (#455) [2f714bf, 68f622c] - Fix race condition in coverage tests (#423) [a5a8ff7, ab9c08b] - Add an extra new line after reporting spec run completion for test2json [874520d] - added name name field to junit reported testsuite [ae61c63] - Do not set the run time of a spec when the dryRun flag is used (#438) [457e2d9, ba8e856] - Process FWhen and FSpecify when unfocusing (#434) [9008c7b, ee65bd, df87dfe] -- Synchronise the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] +- Synchronies the access to the state of specs to avoid race conditions (#430) [7d481bc, ae6829d] - Added Duration on GinkgoTestDescription (#383) [5f49dad, 528417e, 0747408, 329d7ed] - Fix Ginkgo stack trace on failure for Specify (#415) [b977ede, 65ca40e, 6c46eb8] - Update README with Go 1.6+, Golang -> Go (#409) [17f6b97, bc14b66, 20d1598] @@ -317,7 +482,7 @@ Bug Fixes: - Fix incorrect failure message when a panic occurs during a parallel test run - Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. - Be more consistent about handling SIGTERM as well as SIGINT -- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. +- When interrupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts. - Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!) ## 1.1.0 (8/2/2014) diff --git a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md index 15079406e..1da92fe7e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md +++ b/vendor/github.com/onsi/ginkgo/v2/CONTRIBUTING.md @@ -8,6 +8,6 @@ Your contributions to Ginkgo are essential for its long-term maintenance and imp - When adding to the Ginkgo CLI, note that there are very few unit tests. Please add an integration test. - Make sure all the tests succeed via `ginkgo -r -p` - Vet your changes via `go vet ./...` -- Update the documentation. Ginko uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. +- Update the documentation. Ginkgo uses `godoc` comments and documentation in `docs/index.md`. You can run `bundle exec jekyll serve` in the `docs` directory to preview your changes. Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index b24250611..eda4116a9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -90,7 +90,7 @@ If you have a question, comment, bug report, feature request, etc. please open a ## Capabilities -Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://olivinelabs.com/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. +Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) @@ -102,7 +102,7 @@ ginkgo -p By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. -As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). +As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). Ginkgo ships with `ginkgo`, a [command line tool](https://onsi.github.io/ginkgo/#ginkgo-cli-overview) with support for generating, running, filtering, and profiling Ginkgo suites. You can even have Ginkgo automatically run your specs when it detects a change with `ginkgo watch`, enabling rapid feedback loops during test-driven development. diff --git a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md index 0c80f668d..363815d7c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/RELEASING.md +++ b/vendor/github.com/onsi/ginkgo/v2/RELEASING.md @@ -1,7 +1,13 @@ A Ginkgo release is a tagged git sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) diff --git a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go index 67b351a08..a61021d08 100644 --- a/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go +++ b/vendor/github.com/onsi/ginkgo/v2/config/deprecated.go @@ -49,21 +49,21 @@ type DeprecatedDefaultReporterConfigType struct { } // Sadly there is no way to gracefully deprecate access to these global config variables. -// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails type GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} // Sadly there is no way to gracefully deprecate access to these global config variables. -// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails var GinkgoConfig = GinkgoConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} // Sadly there is no way to gracefully deprecate access to these global config variables. -// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails type DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead struct{} // Sadly there is no way to gracefully deprecate access to these global config variables. -// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguraiton() method +// Users who need access to Ginkgo's configuration should use the DSL's GinkgoConfiguration() method // These new unwieldy type names exist to give users a hint when they try to compile and the compilation fails var DefaultReporterConfig = DefaultReporterConfigIsNoLongerAccessibleFromTheConfigPackageUseTheDSLsGinkgoConfigurationFunctionInstead{} diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index df1bb3fcd..4ea63b84e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -23,6 +23,7 @@ import ( "strings" "time" + "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" "github.com/onsi/ginkgo/v2/internal/global" @@ -46,7 +47,9 @@ func init() { var err error flagSet, err = types.BuildTestSuiteFlagSet(&suiteConfig, &reporterConfig) exitIfErr(err) - GinkgoWriter = internal.NewWriter(os.Stdout) + writer := internal.NewWriter(os.Stdout) + GinkgoWriter = writer + GinkgoLogr = internal.GinkgoLogrFunc(writer) } func exitIfErr(err error) { @@ -77,7 +80,7 @@ func exitIfErrors(errors []error) { } } -//The interface implemented by GinkgoWriter +// The interface implemented by GinkgoWriter type GinkgoWriterInterface interface { io.Writer @@ -89,6 +92,15 @@ type GinkgoWriterInterface interface { ClearTeeWriters() } +/* +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo). + +You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. + +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +*/ +type SpecContext = internal.SpecContext + /* GinkgoWriter implements a GinkgoWriterInterface and io.Writer @@ -103,7 +115,12 @@ You can learn more at https://onsi.github.io/ginkgo/#logging-output */ var GinkgoWriter GinkgoWriterInterface -//The interface by which Ginkgo receives *testing.T +/* +GinkgoLogr is a logr.Logger that writes to GinkgoWriter +*/ +var GinkgoLogr logr.Logger + +// The interface by which Ginkgo receives *testing.T type GinkgoTestingT interface { Fail() } @@ -115,7 +132,7 @@ The first return value is the SuiteConfig which controls aspects of how the suit the second return value is the ReporterConfig which controls aspects of how Ginkgo's default reporter emits output. -Mutating the returned configurations has no effect. To reconfigure Ginkgo programatically you need +Mutating the returned configurations has no effect. To reconfigure Ginkgo programmatically you need to pass in your mutated copies into RunSpecs(). You can learn more at https://onsi.github.io/ginkgo/#overriding-ginkgos-command-line-configuration-in-the-suite @@ -168,7 +185,7 @@ func PauseOutputInterception() { outputInterceptor.PauseIntercepting() } -//ResumeOutputInterception() - see docs for PauseOutputInterception() +// ResumeOutputInterception() - see docs for PauseOutputInterception() func ResumeOutputInterception() { if outputInterceptor == nil { return @@ -184,7 +201,7 @@ If you bootstrapped your suite with "ginkgo bootstrap" this is already done for you. Ginkgo is typically configured via command-line flags. This configuration -can be overriden, however, and passed into RunSpecs as optional arguments: +can be overridden, however, and passed into RunSpecs as optional arguments: func TestMySuite(t *testing.T) { RegisterFailHandler(gomega.Fail) @@ -221,7 +238,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { case Labels: suiteLabels = append(suiteLabels, arg...) default: - configErrors = append(configErrors, types.GinkgoErrors.UnkownTypePassedToRunSpecs(arg)) + configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } } exitIfErrors(configErrors) @@ -277,7 +294,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(suiteConfig.Timeout, client), client, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -421,13 +438,31 @@ var XDescribe = PDescribe var Context, FContext, PContext, XContext = Describe, FDescribe, PDescribe, XDescribe /* When is an alias for Describe - it generates the exact same kind of Container node */ -var When, FWhen, PWhen, XWhen = Describe, FDescribe, PDescribe, XDescribe +func When(text string, args ...interface{}) bool { + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func FWhen(text string, args ...interface{}) bool { + args = append(args, internal.Focus) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +/* When is an alias for Describe - it generates the exact same kind of Container node */ +func PWhen(text string, args ...interface{}) bool { + args = append(args, internal.Pending) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeContainer, "when "+text, args...)) +} + +var XWhen = PWhen /* It nodes are Subject nodes that contain your spec code and assertions. Each It node corresponds to an individual Ginkgo spec. You cannot nest any other Ginkgo nodes within an It node's closure. +You can pass It nodes bare functions (func() {}) or functions that receive a SpecContext or context.Context: func(ctx SpecContext) {} and func (ctx context.Context) {}. If the function takes a context then the It is deemed interruptible and Ginkgo will cancel the context in the event of a timeout (configured via the SpecTimeout() or NodeTimeout() decorators) or of an interrupt signal. + You can learn more at https://onsi.github.io/ginkgo/#spec-subjects-it In addition, subject nodes can be decorated with a variety of decorators. You can learn more here: https://onsi.github.io/ginkgo/#decorator-reference */ @@ -478,6 +513,9 @@ Note that By does not generate a new Ginkgo node - rather it is simply synctacti You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by */ func By(text string, callback ...func()) { + if !global.Suite.InRunPhase() { + exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) + } value := struct { Text string Duration time.Duration @@ -485,6 +523,11 @@ func By(text string, callback ...func()) { Text: text, } t := time.Now() + global.Suite.SetProgressStepCursor(internal.ProgressStepCursor{ + Text: text, + CodeLocation: types.NewCodeLocation(1), + StartTime: t, + }) AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) @@ -503,11 +546,15 @@ When running in parallel, each parallel process will call BeforeSuite. You may only register *one* BeforeSuite handler per test suite. You typically do so in your bootstrap file at the top level. +BeforeSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func BeforeSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", body)) +func BeforeSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeBeforeSuite, "", combinedArgs...)) } /* @@ -518,11 +565,15 @@ When running in parallel, each parallel process will call AfterSuite. You may only register *one* AfterSuite handler per test suite. You typically do so in your bootstrap file at the top level. +AfterSuite can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterSuite node's closure. You can learn more here: https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite */ -func AfterSuite(body func()) bool { - return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", body)) +func AfterSuite(body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeAfterSuite, "", combinedArgs...)) } /* @@ -533,19 +584,34 @@ information from that setup to all parallel processes. SynchronizedBeforeSuite accomplishes this by taking *two* function arguments and passing data between them. The first function is only run on parallel process #1. The second is run on all processes, but *only* after the first function completes successfully. The functions have the following signatures: -The first function (which only runs on process #1) has the signature: +The first function (which only runs on process #1) can have any of the following the signatures: + func() + func(ctx context.Context) + func(ctx SpecContext) func() []byte + func(ctx context.Context) []byte + func(ctx SpecContext) []byte -The byte array returned by the first function is then passed to the second function, which has the signature: +The byte array returned by the first function (if present) is then passed to the second function, which can have any of the following signature: + func() + func(ctx context.Context) + func(ctx SpecContext) func(data []byte) + func(ctx context.Context, data []byte) + func(ctx SpecContext, data []byte) + +If either function receives a context.Context/SpecContext it is considered interruptible. You cannot nest any other Ginkgo nodes within an SynchronizedBeforeSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedBeforeSuite(process1Body func() []byte, allProcessBody func([]byte)) bool { - return pushNode(internal.NewSynchronizedBeforeSuiteNode(process1Body, allProcessBody, types.NewCodeLocation(1))) +func SynchronizedBeforeSuite(process1Body interface{}, allProcessBody interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{process1Body, allProcessBody} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedBeforeSuite, "", combinedArgs...)) } /* @@ -554,21 +620,26 @@ and a piece that must only run once - on process #1. SynchronizedAfterSuite accomplishes this by taking *two* function arguments. The first runs on all processes. The second runs only on parallel process #1 and *only* after all other processes have finished and exited. This ensures that process #1, and any resources it is managing, remain alive until -all other processes are finished. +all other processes are finished. These two functions can be bare functions (func()) or interruptible (func(context.Context)/func(SpecContext)) Note that you can also use DeferCleanup() in SynchronizedBeforeSuite to accomplish similar results. You cannot nest any other Ginkgo nodes within an SynchronizedAfterSuite node's closure. You can learn more, and see some examples, here: https://onsi.github.io/ginkgo/#parallel-suite-setup-and-cleanup-synchronizedbeforesuite-and-synchronizedaftersuite */ -func SynchronizedAfterSuite(allProcessBody func(), process1Body func()) bool { - return pushNode(internal.NewSynchronizedAfterSuiteNode(allProcessBody, process1Body, types.NewCodeLocation(1))) +func SynchronizedAfterSuite(allProcessBody interface{}, process1Body interface{}, args ...interface{}) bool { + combinedArgs := []interface{}{allProcessBody, process1Body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeSynchronizedAfterSuite, "", combinedArgs...)) } /* BeforeEach nodes are Setup nodes whose closures run before It node closures. When multiple BeforeEach nodes are defined in nested Container nodes the outermost BeforeEach node closures are run first. +BeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach */ @@ -580,6 +651,8 @@ func BeforeEach(args ...interface{}) bool { JustBeforeEach nodes are similar to BeforeEach nodes, however they are guaranteed to run *after* all BeforeEach node closures - just before the It node closure. This can allow you to separate configuration from creation of resources for a spec. +JustBeforeEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustBeforeEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-creation-and-configuration-justbeforeeach */ @@ -593,6 +666,8 @@ are defined in nested Container nodes the innermost AfterEach node closures are Note that you can also use DeferCleanup() in other Setup or Subject nodes to accomplish similar results. +AfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterEach node's closure. You can learn more here: https://onsi.github.io/ginkgo/#spec-cleanup-aftereach-and-defercleanup */ @@ -603,6 +678,8 @@ func AfterEach(args ...interface{}) bool { /* JustAfterEach nodes are similar to AfterEach nodes, however they are guaranteed to run *before* all AfterEach node closures - just after the It node closure. This can allow you to separate diagnostics collection from teardown for a spec. +JustAfterEach can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a JustAfterEach node's closure. You can learn more and see some examples here: https://onsi.github.io/ginkgo/#separating-diagnostics-collection-and-teardown-justaftereach */ @@ -611,10 +688,12 @@ func JustAfterEach(args ...interface{}) bool { } /* -BeforeAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once before any specs in the Ordered container run. +BeforeAll nodes are Setup nodes that can occur inside Ordered containers. They run just once before any specs in the Ordered container run. Multiple BeforeAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. +BeforeAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within a BeforeAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about BeforeAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -624,12 +703,14 @@ func BeforeAll(args ...interface{}) bool { } /* -AfterAll nodes are Setup nodes that can occur inside Ordered contaienrs. They run just once after all specs in the Ordered container have run. +AfterAll nodes are Setup nodes that can occur inside Ordered containers. They run just once after all specs in the Ordered container have run. Multiple AfterAll nodes can be defined in a given Ordered container however they cannot be nested inside any other container. Note that you can also use DeferCleanup() in a BeforeAll node to accomplish similar behavior. +AfterAll can take a func() body, or an interruptible func(SpecContext)/func(context.Context) body. + You cannot nest any other Ginkgo nodes within an AfterAll node's closure. You can learn more about Ordered Containers at: https://onsi.github.io/ginkgo/#ordered-containers And you can learn more about AfterAll at: https://onsi.github.io/ginkgo/#setup-in-ordered-containers-beforeall-and-afterall @@ -643,16 +724,33 @@ DeferCleanup can be called within any Setup or Subject node to register a cleanu DeferCleanup can be passed: 1. A function that takes no arguments and returns no values. -2. A function that returns an error (in which case it will assert that the returned error was nil, or it will fail the spec). -3. A function that takes arguments (and optionally returns an error) followed by a list of arguments to passe to the function. For example: +2. A function that returns multiple values. `DeferCleanup` will ignore all these return values except for the last one. If this last return value is a non-nil error `DeferCleanup` will fail the spec). +3. A function that takes a context.Context or SpecContext (and optionally returns multiple values). The resulting cleanup node is deemed interruptible and the passed-in context will be cancelled in the event of a timeout or interrupt. +4. A function that takes arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. +5. A function that takes SpecContext and a list of arguments (and optionally returns multiple values) followed by a list of arguments to pass to the function. - BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") - }) +For example: + + BeforeEach(func() { + DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) + os.SetEnv("FOO", "BAR") + }) will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. +Similarly: + + BeforeEach(func() { + DeferCleanup(func(ctx SpecContext, path) { + req, err := http.NewRequestWithContext(ctx, "POST", path, nil) + Expect(err).NotTo(HaveOccured()) + _, err := http.DefaultClient.Do(req) + Expect(err).NotTo(HaveOccured()) + }, "example.com/cleanup", NodeTimeout(time.Second*3)) + }) + +will register a cleanup handler that will have three seconds to successfully complete a request to the specified path. Note that we do not specify a context in the list of arguments passed to DeferCleanup - only in the signature of the function we pass in. Ginkgo will detect the requested context and supply a SpecContext when it invokes the cleanup node. If you want to pass in your own context in addition to the Ginkgo-provided SpecContext you must specify the SpecContext as the first argument (e.g. func(ctx SpecContext, otherCtx context.Context)). + When DeferCleanup is called in BeforeEach, JustBeforeEach, It, AfterEach, or JustAfterEach the registered callback will be invoked when the spec completes (i.e. it will behave like an AfterEach node) When DeferCleanup is called in BeforeAll or AfterAll the registered callback will be invoked when the ordered container completes (i.e. it will behave like an AfterAll node) When DeferCleanup is called in BeforeSuite, SynchronizedBeforeSuite, AfterSuite, or SynchronizedAfterSuite the registered callback will be invoked when the suite completes (i.e. it will behave like an AfterSuite node) @@ -664,5 +762,5 @@ func DeferCleanup(args ...interface{}) { fail := func(message string, cl types.CodeLocation) { global.Failer.Fail(message, cl) } - pushNode(internal.NewCleanupNode(fail, args...)) + pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index f23e526f3..e43d9cbbb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -13,13 +13,21 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat type Offset = internal.Offset /* -FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. +FlakeAttempts(uint N) is a decorator that allows you to mark individual specs or spec containers as flaky. Ginkgo will run them up to `N` times until they pass. -You can learn more here: https://onsi.github.io/ginkgo/#repeating-spec-runs-and-managing-flaky-specs +You can learn more here: https://onsi.github.io/ginkgo/#the-flakeattempts-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ type FlakeAttempts = internal.FlakeAttempts +/* +MustPassRepeatedly(uint N) is a decorator that allows you to repeat the execution of individual specs or spec containers. Ginkgo will run them up to `N` times until they fail. + +You can learn more here: https://onsi.github.io/ginkgo/#the-mustpassrepeatedly-decorator +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +type MustPassRepeatedly = internal.MustPassRepeatedly + /* Focus is a decorator that allows you to mark a spec or container as focused. Identical to FIt and FDescribe. @@ -59,7 +67,7 @@ OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEac per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. The behavior for non-Ordered containers/specs is unchanged. -You can learh more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator +You can learn more here: https://onsi.github.io/ginkgo/#setup-around-ordered-containers-the-onceperordered-decorator You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference */ const OncePerOrdered = internal.OncePerOrdered @@ -80,3 +88,46 @@ Labels are the type for spec Label decorators. Use Label(...) to construct Labe You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels + +/* +PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. + +Ginkgo will start emitting node progress if the node is still running after a duration of PollProgressAfter. This allows you to get quicker feedback about the state of a long-running spec. +*/ +type PollProgressAfter = internal.PollProgressAfter + +/* +PollProgressInterval allows you to override the configured value for --poll-progress-interval for a particular node. + +Once a node has been running for longer than PollProgressAfter Ginkgo will emit node progress periodically at an interval of PollProgresInterval. +*/ +type PollProgressInterval = internal.PollProgressInterval + +/* +NodeTimeout allows you to specify a timeout for an indivdiual node. The node cannot be a container and must be interruptible (i.e. it must be passed a function that accepts a SpecContext or context.Context). + +If the node does not exit within the specified NodeTimeout its context will be cancelled. The node wil then have a period of time controlled by the GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type NodeTimeout = internal.NodeTimeout + +/* +SpecTimeout allows you to specify a timeout for an indivdiual spec. SpecTimeout can only decorate interruptible It nodes. + +All nodes associated with the It node will need to complete before the SpecTimeout has elapsed. Individual nodes (e.g. BeforeEach) may be decorated with different NodeTimeouts - but these can only serve to provide a more stringent deadline for the node in question; they cannot extend the deadline past the SpecTimeout. + +If the spec does not complete within the specified SpecTimeout the currently running node will have its context cancelled. The node wil then have a period of time controlled by that node's GracePeriod decorator (or global --grace-period command-line argument) to exit. If the node does not exit within GracePeriod Ginkgo will leak the node and proceed to any clean-up nodes associated with the current spec. +*/ +type SpecTimeout = internal.SpecTimeout + +/* +GracePeriod denotes the period of time Ginkgo will wait for an interruptible node to exit once an interruption (whether due to a timeout or a user-invoked signal) has occurred. If both the global --grace-period cli flag and a GracePeriod decorator are specified the value in the decorator will take precedence. + +Nodes that do not finish within a GracePeriod will be leaked and Ginkgo will proceed to run subsequent nodes. In the event of a timeout, such leaks will be reported to the user. +*/ +type GracePeriod = internal.GracePeriod + +/* +SuppressProgressReporting is a decorator that allows you to disable progress reporting of a particular node. This is useful if `ginkgo -v -progress` is generating too much noise; particularly +if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. +*/ +const SuppressProgressReporting = internal.SuppressProgressReporting diff --git a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go index d20e5a8ce..f912bbec6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/deprecated_dsl.go @@ -13,7 +13,7 @@ import ( Deprecated: Done Channel for asynchronous testing The Done channel pattern is no longer supported in Ginkgo 2.0. -See here for better patterns for asynchronouse testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing +See here for better patterns for asynchronous testing: https://onsi.github.io/ginkgo/#patterns-for-asynchronous-testing For a migration guide see: https://onsi.github.io/ginkgo/MIGRATING_TO_V2#removed-async-testing */ diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index c6546bba4..c76c3bd94 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -118,6 +118,8 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport { ParallelProcess: g.suite.config.ParallelProcess, IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } @@ -128,7 +130,10 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if spec.Skip { return types.SpecStateSkipped, types.Failure{} } - if g.suite.interruptHandler.Status().Interrupted || g.suite.skipAll { + if g.suite.interruptHandler.Status().Interrupted() || g.suite.skipAll { + return types.SpecStateSkipped, types.Failure{} + } + if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { return types.SpecStateSkipped, types.Failure{} } if !g.succeeded { @@ -163,8 +168,6 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { } func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { - interruptStatus := g.suite.interruptHandler.Status() - pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) @@ -173,12 +176,17 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { nodes = append(nodes, spec.Nodes.FirstNodeWithType(types.NodeTypeIt)) terminatingNode, terminatingPair := Node{}, runOncePair{} + deadline := time.Time{} + if spec.SpecTimeout() > 0 { + deadline = time.Now().Add(spec.SpecTimeout()) + } + for _, node := range nodes { oncePair := pairs.runOncePairFor(node.ID) if !oncePair.isZero() && g.runOnceTracker[oncePair].Is(types.SpecStatePassed) { continue } - g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, interruptStatus.Channel, spec.Nodes.BestTextFor(node)) + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if !oncePair.isZero() { g.runOnceTracker[oncePair] = g.suite.currentSpecReport.State @@ -260,11 +268,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { for _, node := range nodes { afterNodeWasRun[node.ID] = true - state, failure := g.suite.runNode(node, g.suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(node)) + state, failure := g.suite.runNode(node, deadline, spec.Nodes.BestTextFor(node)) g.suite.currentSpecReport.RunTime = time.Since(g.suite.currentSpecReport.StartTime) if g.suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { g.suite.currentSpecReport.State = state g.suite.currentSpecReport.Failure = failure + } else if state.Is(types.SpecStateFailureStates) { + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, types.AdditionalFailure{State: state, Failure: failure}) } } includeDeferCleanups = true @@ -279,7 +289,10 @@ func (g *group) run(specs Specs) { } for _, spec := range g.specs { + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = g.initialReportForSpec(spec) + g.suite.selectiveLock.Unlock() + g.suite.currentSpecReport.State, g.suite.currentSpecReport.Failure = g.evaluateSkipStatus(spec) g.suite.reporter.WillRun(g.suite.currentSpecReport) g.suite.reportEach(spec, types.NodeTypeReportBeforeEach) @@ -288,16 +301,29 @@ func (g *group) run(specs Specs) { g.suite.currentSpecReport.StartTime = time.Now() if !skip { - maxAttempts := max(1, spec.FlakeAttempts()) - if g.suite.config.FlakeAttempts > 0 { + + var maxAttempts = 1 + + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + maxAttempts = max(1, spec.MustPassRepeatedly()) + } else if g.suite.config.FlakeAttempts > 0 { maxAttempts = g.suite.config.FlakeAttempts + g.suite.currentSpecReport.MaxFlakeAttempts = maxAttempts + } else if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + maxAttempts = max(1, spec.FlakeAttempts()) } + for attempt := 0; attempt < maxAttempts; attempt++ { g.suite.currentSpecReport.NumAttempts = attempt + 1 g.suite.writer.Truncate() g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt) + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + } } g.attemptSpec(attempt == maxAttempts-1, spec) @@ -307,8 +333,15 @@ func (g *group) run(specs Specs) { g.suite.currentSpecReport.CapturedGinkgoWriterOutput += string(g.suite.writer.Bytes()) g.suite.currentSpecReport.CapturedStdOutErr += g.suite.outputInterceptor.StopInterceptingAndReturnOutput() - if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break + if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates | types.SpecStateSkipped) { + break + } + } + if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { + if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { + break + } } } } @@ -318,227 +351,8 @@ func (g *group) run(specs Specs) { if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false } + g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} - } -} - -func (g *group) oldRun(specs Specs) { - var suite = g.suite - nodeState := map[uint]types.SpecState{} - groupSucceeded := true - - indexOfLastSpecContainingNodeID := func(id uint) int { - lastIdx := -1 - for idx := range specs { - if specs[idx].Nodes.ContainsNodeID(id) && !specs[idx].Skip { - lastIdx = idx - } - } - return lastIdx - } - - for i, spec := range specs { - suite.currentSpecReport = types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: suite.config.ParallelProcess, - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - } - - skip := spec.Skip - if spec.Nodes.HasNodeMarkedPending() { - skip = true - suite.currentSpecReport.State = types.SpecStatePending - } else { - if suite.interruptHandler.Status().Interrupted || suite.skipAll { - skip = true - } - if !groupSucceeded { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because an earlier spec in an ordered container failed") - } - for _, node := range spec.Nodes.WithType(types.NodeTypeBeforeAll) { - if nodeState[node.ID] == types.SpecStateSkipped { - skip = true - suite.currentSpecReport.Failure = suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), - "Spec skipped because Skip() was called in BeforeAll") - break - } - } - if skip { - suite.currentSpecReport.State = types.SpecStateSkipped - } - } - - if suite.config.DryRun && !skip { - skip = true - suite.currentSpecReport.State = types.SpecStatePassed - } - - suite.reporter.WillRun(suite.currentSpecReport) - //send the spec report to any attached ReportBeforeEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportBeforeEach) - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - //the reportEach failed, skip this spec - skip = true - } - - suite.currentSpecReport.StartTime = time.Now() - maxAttempts := max(1, spec.FlakeAttempts()) - if suite.config.FlakeAttempts > 0 { - maxAttempts = suite.config.FlakeAttempts - } - - for attempt := 0; !skip && (attempt < maxAttempts); attempt++ { - suite.currentSpecReport.NumAttempts = attempt + 1 - suite.writer.Truncate() - suite.outputInterceptor.StartInterceptingOutput() - if attempt > 0 { - fmt.Fprintf(suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) - } - isFinalAttempt := (attempt == maxAttempts-1) - - interruptStatus := suite.interruptHandler.Status() - deepestNestingLevelAttained := -1 - var nodes = spec.Nodes.WithType(types.NodeTypeBeforeAll).Filter(func(n Node) bool { - return nodeState[n.ID] != types.SpecStatePassed - }) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeBeforeEach)...).SortedByAscendingNestingLevel() - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeJustBeforeEach).SortedByAscendingNestingLevel()...) - nodes = nodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeIt)...) - - var terminatingNode Node - for j := range nodes { - deepestNestingLevelAttained = max(deepestNestingLevelAttained, nodes[j].NestingLevel) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(nodes[j], interruptStatus.Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = suite.currentSpecReport.State - if suite.currentSpecReport.State != types.SpecStatePassed { - terminatingNode = nodes[j] - break - } - } - - afterAllNodesThatRan := map[uint]bool{} - // pull out some shared code so we aren't repeating ourselves down below. this just runs after and cleanup nodes - runAfterAndCleanupNodes := func(nodes Nodes) { - for j := range nodes { - state, failure := suite.runNode(nodes[j], suite.interruptHandler.Status().Channel, spec.Nodes.BestTextFor(nodes[j])) - suite.currentSpecReport.RunTime = time.Since(suite.currentSpecReport.StartTime) - nodeState[nodes[j].ID] = state - if suite.currentSpecReport.State == types.SpecStatePassed || state == types.SpecStateAborted { - suite.currentSpecReport.State = state - suite.currentSpecReport.Failure = failure - if state != types.SpecStatePassed { - terminatingNode = nodes[j] - } - } - if nodes[j].NodeType.Is(types.NodeTypeAfterAll) { - afterAllNodesThatRan[nodes[j].ID] = true - } - } - } - - // pull out a helper that captures the logic of whether or not we should run a given After node. - // there is complexity here stemming from the fact that we allow nested ordered contexts and flakey retries - shouldRunAfterNode := func(n Node) bool { - if n.NodeType.Is(types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { - return true - } - var id uint - if n.NodeType.Is(types.NodeTypeAfterAll) { - id = n.ID - if afterAllNodesThatRan[id] { //we've already run on this attempt. don't run again. - return false - } - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) { - id = n.NodeIDWhereCleanupWasGenerated - } - isLastSpecWithNode := indexOfLastSpecContainingNodeID(id) == i - - switch suite.currentSpecReport.State { - case types.SpecStatePassed: //we've passed so far... - return isLastSpecWithNode //... and we're the last spec with this AfterNode, so we should run it - case types.SpecStateSkipped: //the spec was skipped by the user... - if isLastSpecWithNode { - return true //...we're the last spec, so we should run the AfterNode - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //...or, a BeforeAll was skipped and it's at our nesting level, so our subgroup is going to skip - } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... - if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run - } - if terminatingNode.NodeType.Is(types.NodeTypeBeforeAll) { - //...we'll be rerunning a BeforeAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeAfterAll) && terminatingNode.NestingLevel == n.NestingLevel { - return true //we're at the same nesting level - } - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - if terminatingNode.NodeType.Is(types.NodeTypeAfterAll) { - //...we'll be rerunning an AfterAll so we should cleanup after it if... - if n.NodeType.Is(types.NodeTypeCleanupAfterAll) && terminatingNode.ID == n.NodeIDWhereCleanupWasGenerated { - return true //we're a DeferCleanup generated by it - } - } - case types.SpecStateInterrupted, types.SpecStateAborted: // ...we've been interrupted and/or aborted - return true //...that means the test run is over and we should clean up the stack. Run the AfterNode - } - return false - } - - // first pass - run all the JustAfterEach, Aftereach, and AfterAlls. Our shoudlRunAfterNode filter function will clean up the AfterAlls for us. - afterNodes := spec.Nodes.WithType(types.NodeTypeJustAfterEach).SortedByDescendingNestingLevel() - afterNodes = afterNodes.CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterEach).CopyAppend(spec.Nodes.WithType(types.NodeTypeAfterAll)...).SortedByDescendingNestingLevel()...) - afterNodes = afterNodes.WithinNestingLevel(deepestNestingLevelAttained) - afterNodes = afterNodes.Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // second-pass perhaps we didn't run the AfterAlls but a state change due to an AfterEach now requires us to run the AfterAlls: - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // now we run any DeferCleanups - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterEach).Reverse() - afterNodes = append(afterNodes, suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Filter(shouldRunAfterNode).Reverse()...) - runAfterAndCleanupNodes(afterNodes) - - // third-pass, perhaps a DeferCleanup failed and now we need to run the AfterAlls. - afterNodes = spec.Nodes.WithType(types.NodeTypeAfterAll).WithinNestingLevel(deepestNestingLevelAttained).Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - // and finally - running AfterAlls may have generated some new DeferCleanup nodes, let's run them to finish up - afterNodes = suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterAll).Reverse().Filter(shouldRunAfterNode) - runAfterAndCleanupNodes(afterNodes) - - suite.currentSpecReport.EndTime = time.Now() - suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) - suite.currentSpecReport.CapturedGinkgoWriterOutput += string(suite.writer.Bytes()) - suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() - - if suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { - break - } - } - - //send the spec report to any attached ReportAfterEach blocks - this will update suite.currentSpecReport if failures occur in these blocks - suite.reportEach(spec, types.NodeTypeReportAfterEach) - suite.processCurrentSpecReport() - if suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { - groupSucceeded = false - } - suite.currentSpecReport = types.SpecReport{} + g.suite.selectiveLock.Unlock() } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index aca7d1c43..ac6f51040 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -1,39 +1,38 @@ package interrupt_handler import ( - "fmt" "os" "os/signal" - "runtime" "sync" "syscall" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION = 30 * time.Second -const TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT = 10 const ABORT_POLLING_INTERVAL = 500 * time.Millisecond -const ABORT_REPEAT_INTERRUPT_DURATION = 30 * time.Second type InterruptCause uint const ( InterruptCauseInvalid InterruptCause = iota - InterruptCauseSignal - InterruptCauseTimeout InterruptCauseAbortByOtherProcess ) +type InterruptLevel uint + +const ( + InterruptLevelUninterrupted InterruptLevel = iota + InterruptLevelCleanupAndReport + InterruptLevelReportOnly + InterruptLevelBailOut +) + func (ic InterruptCause) String() string { switch ic { case InterruptCauseSignal: return "Interrupted by User" - case InterruptCauseTimeout: - return "Interrupted by Timeout" case InterruptCauseAbortByOtherProcess: return "Interrupted by Other Ginkgo Process" } @@ -41,37 +40,49 @@ func (ic InterruptCause) String() string { } type InterruptStatus struct { - Interrupted bool - Channel chan interface{} - Cause InterruptCause + Channel chan interface{} + Level InterruptLevel + Cause InterruptCause +} + +func (s InterruptStatus) Interrupted() bool { + return s.Level != InterruptLevelUninterrupted +} + +func (s InterruptStatus) Message() string { + return s.Cause.String() +} + +func (s InterruptStatus) ShouldIncludeProgressReport() bool { + return s.Cause != InterruptCauseAbortByOtherProcess } type InterruptHandlerInterface interface { Status() InterruptStatus - SetInterruptPlaceholderMessage(string) - ClearInterruptPlaceholderMessage() - InterruptMessageWithStackTraces() string } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - interrupted bool - interruptPlaceholderMessage string - interruptCause InterruptCause - client parallel_support.Client - stop chan interface{} + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal } -func NewInterruptHandler(timeout time.Duration, client parallel_support.Client) *InterruptHandler { +func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { + if len(signals) == 0 { + signals = []os.Signal{os.Interrupt, syscall.SIGTERM} + } handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - interrupted: false, - stop: make(chan interface{}), - client: client, + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + client: client, + signals: signals, } - handler.registerForInterrupts(timeout) + handler.registerForInterrupts() return handler } @@ -79,30 +90,22 @@ func (handler *InterruptHandler) Stop() { close(handler.stop) } -func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { +func (handler *InterruptHandler) registerForInterrupts() { // os signal handling signalChannel := make(chan os.Signal, 1) - signal.Notify(signalChannel, os.Interrupt, syscall.SIGTERM) - - // timeout handling - var timeoutChannel <-chan time.Time - var timeoutTimer *time.Timer - if timeout > 0 { - timeoutTimer = time.NewTimer(timeout) - timeoutChannel = timeoutTimer.C - } + signal.Notify(signalChannel, handler.signals...) // cross-process abort handling - var abortChannel chan bool + var abortChannel chan interface{} if handler.client != nil { - abortChannel = make(chan bool) + abortChannel = make(chan interface{}) go func() { pollTicker := time.NewTicker(ABORT_POLLING_INTERVAL) for { select { case <-pollTicker.C: if handler.client.ShouldAbort() { - abortChannel <- true + close(abortChannel) pollTicker.Stop() return } @@ -114,55 +117,37 @@ func (handler *InterruptHandler) registerForInterrupts(timeout time.Duration) { }() } - // listen for any interrupt signals - // note that some (timeouts, cross-process aborts) will only trigger once - // for these we set up a ticker to keep interrupting the suite until it ends - // this ensures any `AfterEach` or `AfterSuite`s that get stuck cleaning up - // get interrupted eventually - go func() { + go func(abortChannel chan interface{}) { var interruptCause InterruptCause - var repeatChannel <-chan time.Time - var repeatTicker *time.Ticker for { select { case <-signalChannel: interruptCause = InterruptCauseSignal - case <-timeoutChannel: - interruptCause = InterruptCauseTimeout - repeatInterruptTimeout := timeout / time.Duration(TIMEOUT_REPEAT_INTERRUPT_FRACTION_OF_TIMEOUT) - if repeatInterruptTimeout > TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION { - repeatInterruptTimeout = TIMEOUT_REPEAT_INTERRUPT_MAXIMUM_DURATION - } - timeoutTimer.Stop() - repeatTicker = time.NewTicker(repeatInterruptTimeout) - repeatChannel = repeatTicker.C case <-abortChannel: interruptCause = InterruptCauseAbortByOtherProcess - repeatTicker = time.NewTicker(ABORT_REPEAT_INTERRUPT_DURATION) - repeatChannel = repeatTicker.C - case <-repeatChannel: - //do nothing, just interrupt again using the same interruptCause case <-handler.stop: - if timeoutTimer != nil { - timeoutTimer.Stop() - } - if repeatTicker != nil { - repeatTicker.Stop() - } signal.Stop(signalChannel) return } + abortChannel = nil + handler.lock.Lock() - handler.interruptCause = interruptCause - if handler.interruptPlaceholderMessage != "" { - fmt.Println(handler.interruptPlaceholderMessage) + oldLevel := handler.level + handler.cause = interruptCause + if handler.level == InterruptLevelUninterrupted { + handler.level = InterruptLevelCleanupAndReport + } else if handler.level == InterruptLevelCleanupAndReport { + handler.level = InterruptLevelReportOnly + } else if handler.level == InterruptLevelReportOnly { + handler.level = InterruptLevelBailOut + } + if handler.level != oldLevel { + close(handler.c) + handler.c = make(chan interface{}) } - handler.interrupted = true - close(handler.c) - handler.c = make(chan interface{}) handler.lock.Unlock() } - }() + }(abortChannel) } func (handler *InterruptHandler) Status() InterruptStatus { @@ -170,43 +155,8 @@ func (handler *InterruptHandler) Status() InterruptStatus { defer handler.lock.Unlock() return InterruptStatus{ - Interrupted: handler.interrupted, - Channel: handler.c, - Cause: handler.interruptCause, - } -} - -func (handler *InterruptHandler) SetInterruptPlaceholderMessage(message string) { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = message -} - -func (handler *InterruptHandler) ClearInterruptPlaceholderMessage() { - handler.lock.Lock() - defer handler.lock.Unlock() - - handler.interruptPlaceholderMessage = "" -} - -func (handler *InterruptHandler) InterruptMessageWithStackTraces() string { - handler.lock.Lock() - out := fmt.Sprintf("%s\n\n", handler.interruptCause.String()) - defer handler.lock.Unlock() - if handler.interruptCause == InterruptCauseAbortByOtherProcess { - return out - } - out += "Here's a stack trace of all running goroutines:\n" - buf := make([]byte, 8192) - for { - n := runtime.Stack(buf, true) - if n < len(buf) { - buf = buf[:n] - break - } - buf = make([]byte, 2*len(buf)) + Level: handler.level, + Channel: handler.c, + Cause: handler.cause, } - out += formatter.Fi(1, "%s", string(buf)) - return out } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 289e4dde2..9eb835e9d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -1,9 +1,11 @@ package internal import ( + "context" "fmt" "reflect" "sort" + "time" "sync" @@ -27,26 +29,38 @@ type Node struct { NodeType types.NodeType Text string - Body func() + Body func(SpecContext) CodeLocation types.CodeLocation NestingLevel int + HasContext bool - SynchronizedBeforeSuiteProc1Body func() []byte - SynchronizedBeforeSuiteAllProcsBody func([]byte) + SynchronizedBeforeSuiteProc1Body func(SpecContext) []byte + SynchronizedBeforeSuiteProc1BodyHasContext bool + SynchronizedBeforeSuiteAllProcsBody func(SpecContext, []byte) + SynchronizedBeforeSuiteAllProcsBodyHasContext bool - SynchronizedAfterSuiteAllProcsBody func() - SynchronizedAfterSuiteProc1Body func() + SynchronizedAfterSuiteAllProcsBody func(SpecContext) + SynchronizedAfterSuiteAllProcsBodyHasContext bool + SynchronizedAfterSuiteProc1Body func(SpecContext) + SynchronizedAfterSuiteProc1BodyHasContext bool ReportEachBody func(types.SpecReport) ReportAfterSuiteBody func(types.Report) - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedOncePerOrdered bool - FlakeAttempts int - Labels Labels + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedOncePerOrdered bool + MarkedSuppressProgressReporting bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } @@ -57,17 +71,25 @@ type pendingType bool type serialType bool type orderedType bool type honorsOrderedType bool +type suppressProgressReporting bool const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) const OncePerOrdered = honorsOrderedType(true) +const SuppressProgressReporting = suppressProgressReporting(true) type FlakeAttempts uint +type MustPassRepeatedly uint type Offset uint type Done chan<- interface{} // Deprecated Done Channel for asynchronous testing type Labels []string +type PollProgressInterval time.Duration +type PollProgressAfter time.Duration +type NodeTimeout time.Duration +type SpecTimeout time.Duration +type GracePeriod time.Duration func UnionOfLabels(labels ...Labels) Labels { out := Labels{} @@ -114,10 +136,24 @@ func isDecoration(arg interface{}) bool { return true case t == reflect.TypeOf(OncePerOrdered): return true + case t == reflect.TypeOf(SuppressProgressReporting): + return true case t == reflect.TypeOf(FlakeAttempts(0)): return true + case t == reflect.TypeOf(MustPassRepeatedly(0)): + return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(PollProgressInterval(0)): + return true + case t == reflect.TypeOf(PollProgressAfter(0)): + return true + case t == reflect.TypeOf(NodeTimeout(0)): + return true + case t == reflect.TypeOf(SpecTimeout(0)): + return true + case t == reflect.TypeOf(GracePeriod(0)): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -138,16 +174,23 @@ func isSliceOfDecorations(slice interface{}) bool { return true } +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeType, text string, args ...interface{}) (Node, []error) { baseOffset := 2 node := Node{ - ID: UniqueNodeID(), - NodeType: nodeType, - Text: text, - Labels: Labels{}, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, + ID: UniqueNodeID(), + NodeType: nodeType, + Text: text, + Labels: Labels{}, + CodeLocation: types.NewCodeLocation(baseOffset), + NestingLevel: -1, + PollProgressAfter: -1, + PollProgressInterval: -1, + GracePeriod: -1, } + errors := []error{} appendError := func(err error) { if err != nil { @@ -176,7 +219,6 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy remainingArgs = []interface{}{} //now process the rest of the args for _, arg := range args { - switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(float64(0)): break //ignore deprecated timeouts @@ -205,11 +247,46 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } + case t == reflect.TypeOf(SuppressProgressReporting): + node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting")) + } case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "FlakeAttempts")) } + case t == reflect.TypeOf(MustPassRepeatedly(0)): + node.MustPassRepeatedly = int(arg.(MustPassRepeatedly)) + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "MustPassRepeatedly")) + } + case t == reflect.TypeOf(PollProgressAfter(0)): + node.PollProgressAfter = time.Duration(arg.(PollProgressAfter)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressAfter")) + } + case t == reflect.TypeOf(PollProgressInterval(0)): + node.PollProgressInterval = time.Duration(arg.(PollProgressInterval)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "PollProgressInterval")) + } + case t == reflect.TypeOf(NodeTimeout(0)): + node.NodeTimeout = time.Duration(arg.(NodeTimeout)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "NodeTimeout")) + } + case t == reflect.TypeOf(SpecTimeout(0)): + node.SpecTimeout = time.Duration(arg.(SpecTimeout)) + if !nodeType.Is(types.NodeTypeIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SpecTimeout")) + } + case t == reflect.TypeOf(GracePeriod(0)): + node.GracePeriod = time.Duration(arg.(GracePeriod)) + if nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) + } case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -223,23 +300,85 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } } case t.Kind() == reflect.Func: - if node.Body != nil { - appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - isValid := (t.NumOut() == 0) && (t.NumIn() <= 1) && (t.NumIn() == 0 || t.In(0) == reflect.TypeOf(make(Done))) - if !isValid { - appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) - trackedFunctionError = true - break - } - if t.NumIn() == 0 { - node.Body = arg.(func()) + if nodeType.Is(types.NodeTypeContainer) { + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if t.NumOut() > 0 || t.NumIn() > 0 { + appendError(types.GinkgoErrors.InvalidBodyTypeForContainer(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body := arg.(func()) + node.Body = func(SpecContext) { body() } + } else if nodeType.Is(types.NodeTypeReportBeforeEach | types.NodeTypeReportAfterEach) { + if node.ReportEachBody == nil { + node.ReportEachBody = arg.(func(types.SpecReport)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeReportAfterSuite) { + if node.ReportAfterSuiteBody == nil { + node.ReportAfterSuiteBody = arg.(func(types.Report)) + } else { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + } else if nodeType.Is(types.NodeTypeSynchronizedBeforeSuite) { + if node.SynchronizedBeforeSuiteProc1Body != nil && node.SynchronizedBeforeSuiteAllProcsBody != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedBeforeSuiteProc1Body == nil { + body, hasContext := extractSynchronizedBeforeSuiteProc1Body(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteProc1Body, node.SynchronizedBeforeSuiteProc1BodyHasContext = body, hasContext + } else if node.SynchronizedBeforeSuiteAllProcsBody == nil { + body, hasContext := extractSynchronizedBeforeSuiteAllProcsBody(arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t, node.CodeLocation)) + trackedFunctionError = true + } + node.SynchronizedBeforeSuiteAllProcsBody, node.SynchronizedBeforeSuiteAllProcsBodyHasContext = body, hasContext + } + } else if nodeType.Is(types.NodeTypeSynchronizedAfterSuite) { + if node.SynchronizedAfterSuiteAllProcsBody != nil && node.SynchronizedAfterSuiteProc1Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + body, hasContext := extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + if node.SynchronizedAfterSuiteAllProcsBody == nil { + node.SynchronizedAfterSuiteAllProcsBody, node.SynchronizedAfterSuiteAllProcsBodyHasContext = body, hasContext + } else if node.SynchronizedAfterSuiteProc1Body == nil { + node.SynchronizedAfterSuiteProc1Body, node.SynchronizedAfterSuiteProc1BodyHasContext = body, hasContext + } } else { - deprecationTracker.TrackDeprecation(types.Deprecations.Async(), node.CodeLocation) - deprecatedAsyncBody := arg.(func(Done)) - node.Body = func() { deprecatedAsyncBody(make(Done)) } + if node.Body != nil { + appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } + node.Body, node.HasContext = extractBodyFunction(deprecationTracker, node.CodeLocation, arg) + if node.Body == nil { + appendError(types.GinkgoErrors.InvalidBodyType(t, node.CodeLocation, nodeType)) + trackedFunctionError = true + break + } } default: remainingArgs = append(remainingArgs, arg) @@ -251,13 +390,32 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } - if node.Body == nil && !node.MarkedPending && !trackedFunctionError { + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext + + if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { + appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) + } + + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + + if node.NodeType.Is(types.NodeTypeSynchronizedBeforeSuite) && !trackedFunctionError && (node.SynchronizedBeforeSuiteProc1Body == nil || node.SynchronizedBeforeSuiteAllProcsBody == nil) { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } + + if node.NodeType.Is(types.NodeTypeSynchronizedAfterSuite) && !trackedFunctionError && (node.SynchronizedAfterSuiteProc1Body == nil || node.SynchronizedAfterSuiteAllProcsBody == nil) { + appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) + } + for _, arg := range remainingArgs { appendError(types.GinkgoErrors.UnknownDecorator(node.CodeLocation, nodeType, arg)) } + if node.FlakeAttempts > 0 && node.MustPassRepeatedly > 0 { + appendError(types.GinkgoErrors.InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(node.CodeLocation, nodeType)) + } + if len(errors) > 0 { return Node{}, errors } @@ -265,96 +423,157 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy return node, errors } -func NewSynchronizedBeforeSuiteNode(proc1Body func() []byte, allProcsBody func([]byte), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedBeforeSuite, - SynchronizedBeforeSuiteProc1Body: proc1Body, - SynchronizedBeforeSuiteAllProcsBody: allProcsBody, - CodeLocation: codeLocation, - }, nil -} - -func NewSynchronizedAfterSuiteNode(allProcsBody func(), proc1Body func(), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeSynchronizedAfterSuite, - SynchronizedAfterSuiteAllProcsBody: allProcsBody, - SynchronizedAfterSuiteProc1Body: proc1Body, - CodeLocation: codeLocation, - }, nil -} - -func NewReportBeforeEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeReportBeforeEach, - ReportEachBody: body, - CodeLocation: codeLocation, - NestingLevel: -1, - }, nil -} - -func NewReportAfterEachNode(body func(types.SpecReport), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeReportAfterEach, - ReportEachBody: body, - CodeLocation: codeLocation, - NestingLevel: -1, - }, nil -} - -func NewReportAfterSuiteNode(text string, body func(types.Report), codeLocation types.CodeLocation) (Node, []error) { - return Node{ - ID: UniqueNodeID(), - Text: text, - NodeType: types.NodeTypeReportAfterSuite, - ReportAfterSuiteBody: body, - CodeLocation: codeLocation, - }, nil +var doneType = reflect.TypeOf(make(Done)) + +func extractBodyFunction(deprecationTracker *types.DeprecationTracker, cl types.CodeLocation, arg interface{}) (func(SpecContext), bool) { + t := reflect.TypeOf(arg) + if t.NumOut() > 0 || t.NumIn() > 1 { + return nil, false + } + if t.NumIn() == 1 { + if t.In(0) == doneType { + deprecationTracker.TrackDeprecation(types.Deprecations.Async(), cl) + deprecatedAsyncBody := arg.(func(Done)) + return func(SpecContext) { deprecatedAsyncBody(make(Done)) }, false + } else if t.In(0).Implements(specContextType) { + return arg.(func(SpecContext)), true + } else if t.In(0).Implements(contextType) { + body := arg.(func(context.Context)) + return func(c SpecContext) { body(c) }, true + } + + return nil, false + } + + body := arg.(func()) + return func(SpecContext) { body() }, false } -func NewCleanupNode(fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { - baseOffset := 2 - node := Node{ - ID: UniqueNodeID(), - NodeType: types.NodeTypeCleanupInvalid, - CodeLocation: types.NewCodeLocation(baseOffset), - NestingLevel: -1, +var byteType = reflect.TypeOf([]byte{}) + +func extractSynchronizedBeforeSuiteProc1Body(arg interface{}) (func(SpecContext) []byte, bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + + if t.NumOut() > 1 || t.NumIn() > 1 { + return nil, false + } else if t.NumOut() == 1 && t.Out(0) != byteType { + return nil, false + } else if t.NumIn() == 1 && !t.In(0).Implements(contextType) { + return nil, false } - remainingArgs := []interface{}{} - for _, arg := range args { + hasContext := t.NumIn() == 1 + + return func(c SpecContext) []byte { + var out []reflect.Value + if hasContext { + out = v.Call([]reflect.Value{reflect.ValueOf(c)}) + } else { + out = v.Call([]reflect.Value{}) + } + if len(out) == 1 { + return (out[0].Interface()).([]byte) + } else { + return []byte{} + } + }, hasContext +} + +func extractSynchronizedBeforeSuiteAllProcsBody(arg interface{}) (func(SpecContext, []byte), bool) { + t := reflect.TypeOf(arg) + v := reflect.ValueOf(arg) + hasContext, hasByte := false, false + + if t.NumOut() > 0 || t.NumIn() > 2 { + return nil, false + } else if t.NumIn() == 2 && t.In(0).Implements(contextType) && t.In(1) == byteType { + hasContext, hasByte = true, true + } else if t.NumIn() == 1 && t.In(0).Implements(contextType) { + hasContext = true + } else if t.NumIn() == 1 && t.In(0) == byteType { + hasByte = true + } else if t.NumIn() != 0 { + return nil, false + } + + return func(c SpecContext, b []byte) { + in := []reflect.Value{} + if hasContext { + in = append(in, reflect.ValueOf(c)) + } + if hasByte { + in = append(in, reflect.ValueOf(b)) + } + v.Call(in) + }, hasContext +} + +var errInterface = reflect.TypeOf((*error)(nil)).Elem() + +func NewCleanupNode(deprecationTracker *types.DeprecationTracker, fail func(string, types.CodeLocation), args ...interface{}) (Node, []error) { + decorations, remainingArgs := PartitionDecorations(args...) + baseOffset := 2 + cl := types.NewCodeLocation(baseOffset) + finalArgs := []interface{}{} + for _, arg := range decorations { switch t := reflect.TypeOf(arg); { case t == reflect.TypeOf(Offset(0)): - node.CodeLocation = types.NewCodeLocation(baseOffset + int(arg.(Offset))) + cl = types.NewCodeLocation(baseOffset + int(arg.(Offset))) case t == reflect.TypeOf(types.CodeLocation{}): - node.CodeLocation = arg.(types.CodeLocation) + cl = arg.(types.CodeLocation) default: - remainingArgs = append(remainingArgs, arg) + finalArgs = append(finalArgs, arg) } } + finalArgs = append(finalArgs, cl) if len(remainingArgs) == 0 { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callback := reflect.ValueOf(remainingArgs[0]) - if !(callback.Kind() == reflect.Func && callback.Type().NumOut() <= 1) { - return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(node.CodeLocation)} + if !(callback.Kind() == reflect.Func) { + return Node{}, []error{types.GinkgoErrors.DeferCleanupInvalidFunction(cl)} } + callArgs := []reflect.Value{} for _, arg := range remainingArgs[1:] { callArgs = append(callArgs, reflect.ValueOf(arg)) } - cl := node.CodeLocation - node.Body = func() { - out := callback.Call(callArgs) - if len(out) == 1 && !out[0].IsNil() { - fail(fmt.Sprintf("DeferCleanup callback returned error: %v", out[0]), cl) + + hasContext := false + t := callback.Type() + if t.NumIn() > 0 { + if t.In(0).Implements(specContextType) { + hasContext = true + } else if t.In(0).Implements(contextType) && (len(callArgs) == 0 || !callArgs[0].Type().Implements(contextType)) { + hasContext = true } } - return node, nil + handleFailure := func(out []reflect.Value) { + if len(out) == 0 { + return + } + last := out[len(out)-1] + if last.Type().Implements(errInterface) && !last.IsNil() { + fail(fmt.Sprintf("DeferCleanup callback returned error: %v", last), cl) + } + } + + if hasContext { + finalArgs = append(finalArgs, func(c SpecContext) { + out := callback.Call(append([]reflect.Value{reflect.ValueOf(c)}, callArgs...)) + handleFailure(out) + }) + } else { + finalArgs = append(finalArgs, func() { + out := callback.Call(callArgs) + handleFailure(out) + }) + } + + return NewNode(deprecationTracker, types.NodeTypeCleanupInvalid, "", finalArgs...) } func (n Node) IsZero() bool { @@ -642,6 +861,26 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) GetMaxFlakeAttempts() int { + maxFlakeAttempts := 0 + for i := range n { + if n[i].FlakeAttempts > 0 { + maxFlakeAttempts = n[i].FlakeAttempts + } + } + return maxFlakeAttempts +} + +func (n Nodes) GetMaxMustPassRepeatedly() int { + maxMustPassRepeatedly := 0 + for i := range n { + if n[i].MustPassRepeatedly > 0 { + maxMustPassRepeatedly = n[i].MustPassRepeatedly + } + } + return maxMustPassRepeatedly +} + func unrollInterfaceSlice(args interface{}) []interface{} { v := reflect.ValueOf(args) if v.Kind() != reflect.Slice { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index f4723a54c..161be820c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -12,7 +12,7 @@ type SpecIndices []int func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, GroupedSpecIndices) { /* - Ginkgo has sophisticated suport for randomizing specs. Specs are guaranteed to have the same + Ginkgo has sophisticated support for randomizing specs. Specs are guaranteed to have the same order for a given seed across test runs. By default only top-level containers and specs are shuffled - this makes for a more intuitive debugging diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go index 6f7c386cc..4a1c09461 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor.go @@ -29,7 +29,7 @@ You have several options to fix this. In preferred order they are: 2. Ensure your process exits before the current spec completes. If your process is long-lived and must cross spec boundaries, this option won't work for you. -3. Pause Ginkgo's output interceptor befor starting your process and then +3. Pause Ginkgo's output interceptor before starting your process and then resume it after. Use PauseOutputInterception() and ResumeOutputInterception() to do this. 4. Set --output-interceptor-mode=none when running your Ginkgo suite. This will @@ -143,7 +143,7 @@ func (interceptor *genericOutputInterceptor) ResumeIntercepting() { go startPipeFactory(interceptor.pipeChannel, interceptor.shutdown) } - // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is tring to log to stdout and stderr) + // Now we make a pipe, we'll use this to redirect the input to the 1 and 2 file descriptors (this is how everything else in the world is string to log to stdout and stderr) // we get the pipe from our pipe factory. it runs in the background so we can request the next pipe while the spec being intercepted is running interceptor.pipe = <-interceptor.pipeChannel @@ -192,7 +192,7 @@ func (interceptor *genericOutputInterceptor) PauseIntercepting() { should eventually receive an EOF and exit. **However**, if the user has spun up an external process and passed in os.Stdout/os.Stderr to cmd.Stdout/cmd.Stderr then the external process - will have a file descriptor pointing to the pipe writer's file desription and it will not close until the external process exits. + will have a file descriptor pointing to the pipe writer's file description and it will not close until the external process exits. That would leave us hanging here waiting for the io.Copy to close forever. Instead we invoke this emergency escape valve. This returns whatever content we've got but leaves the io.Copy running. This ensures the external process can continue writing without hanging at the cost of leaking a goroutine diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index e875001c2..f5ae15b8b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -28,7 +28,7 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write - // to these files to emit output to the console evne though we're intercepting output + // to these files to emit output to the console even though we're intercepting output stdoutClone := os.NewFile(uintptr(stdoutCloneFD), "stdout-clone") stderrClone := os.NewFile(uintptr(stderrCloneFD), "stderr-clone") diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index 7d5cb0b63..b417bf5b3 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -49,6 +49,7 @@ type Client interface { FetchNextCounter() (int, error) PostAbort() error ShouldAbort() bool + PostEmitProgressReport(report types.ProgressReport) error Write(p []byte) (int, error) } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index d076d5d1c..ad9932f2a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -94,6 +94,10 @@ func (client *httpClient) PostSuiteDidEnd(report types.Report) error { return client.post("/suite-did-end", report) } +func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.post("/progress-report", report) +} + func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index ca1dcdca5..fa3ac682a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -49,6 +49,7 @@ func (server *httpServer) Start() { mux.HandleFunc("/did-run", server.didRun) mux.HandleFunc("/suite-did-end", server.specSuiteDidEnd) mux.HandleFunc("/emit-output", server.emitOutput) + mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) @@ -155,6 +156,14 @@ func (server *httpServer) emitOutput(writer http.ResponseWriter, request *http.R server.handleError(server.handler.EmitOutput(output, &n), writer) } +func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request *http.Request) { + var report types.ProgressReport + if !server.decode(writer, request, &report) { + return + } + server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) +} + func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index 4e83b0970..fe93cc2b9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -72,6 +72,10 @@ func (client *rpcClient) Write(p []byte) (int, error) { return n, err } +func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) error { + return client.client.Call("Server.EmitProgressReport", report, voidReceiver) +} + func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index ca471cf39..7c6e67b96 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -108,6 +108,13 @@ func (handler *ServerHandler) EmitOutput(output []byte, n *int) error { return err } +func (handler *ServerHandler) EmitProgressReport(report types.ProgressReport, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reporter.EmitProgressReport(report) + return nil +} + func (handler *ServerHandler) registerAlive(proc int, alive func() bool) { handler.lock.Lock() defer handler.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go new file mode 100644 index 000000000..345db544b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -0,0 +1,291 @@ +package internal + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/types" +) + +var _SOURCE_CACHE = map[string][]string{} + +type ProgressSignalRegistrar func(func()) context.CancelFunc + +func RegisterForProgressSignal(handler func()) context.CancelFunc { + signalChannel := make(chan os.Signal, 1) + if len(PROGRESS_SIGNALS) > 0 { + signal.Notify(signalChannel, PROGRESS_SIGNALS...) + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + for { + select { + case <-signalChannel: + handler() + case <-ctx.Done(): + signal.Stop(signalChannel) + return + } + } + }() + + return cancel +} + +type ProgressStepCursor struct { + Text string + CodeLocation types.CodeLocation + StartTime time.Time +} + +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { + pr := types.ProgressReport{ + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, + + Time: time.Now(), + + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + LeafNodeText: report.LeafNodeText, + LeafNodeLocation: report.LeafNodeLocation, + SpecStartTime: report.StartTime, + + CurrentNodeType: currentNode.NodeType, + CurrentNodeText: currentNode.Text, + CurrentNodeLocation: currentNode.CodeLocation, + CurrentNodeStartTime: currentNodeStartTime, + + CurrentStepText: currentStep.Text, + CurrentStepLocation: currentStep.CodeLocation, + CurrentStepStartTime: currentStep.StartTime, + + AdditionalReports: additionalReports, + + CapturedGinkgoWriterOutput: gwOutput, + GinkgoWriterOffset: len(gwOutput), + } + + goroutines, err := extractRunningGoroutines() + if err != nil { + return pr, err + } + pr.Goroutines = goroutines + + // now we want to try to find goroutines of interest. these will be goroutines that have any function calls with code in packagesOfInterest: + packagesOfInterest := map[string]bool{} + packageFromFilename := func(filename string) string { + return filepath.Dir(filename) + } + addPackageFor := func(filename string) { + if filename != "" { + packagesOfInterest[packageFromFilename(filename)] = true + } + } + isPackageOfInterest := func(filename string) bool { + stackPackage := packageFromFilename(filename) + for packageOfInterest := range packagesOfInterest { + if strings.HasPrefix(stackPackage, packageOfInterest) { + return true + } + } + return false + } + for _, location := range report.ContainerHierarchyLocations { + addPackageFor(location.FileName) + } + addPackageFor(report.LeafNodeLocation.FileName) + addPackageFor(currentNode.CodeLocation.FileName) + addPackageFor(currentStep.CodeLocation.FileName) + + //First, we find the SpecGoroutine - this will be the goroutine that includes `runNode` + specGoRoutineIdx := -1 + runNodeFunctionCallIdx := -1 +OUTER: + for goroutineIdx, goroutine := range pr.Goroutines { + for functionCallIdx, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2/internal.(*Suite).runNode.func") { + specGoRoutineIdx = goroutineIdx + runNodeFunctionCallIdx = functionCallIdx + break OUTER + } + } + } + + //Now, we find the first non-Ginkgo function call + if specGoRoutineIdx > -1 { + for runNodeFunctionCallIdx >= 0 { + fn := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function + file := goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename + // these are all things that could potentially happen from within ginkgo + if strings.Contains(fn, "ginkgo/v2/internal") || strings.Contains(fn, "reflect.Value") || strings.Contains(file, "ginkgo/table_dsl") || strings.Contains(file, "ginkgo/core_dsl") { + runNodeFunctionCallIdx-- + continue + } + if strings.Contains(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Function, "ginkgo/table_dsl") { + + } + //found it! lets add its package of interest + addPackageFor(goroutines[specGoRoutineIdx].Stack[runNodeFunctionCallIdx].Filename) + break + } + } + + ginkgoEntryPointIdx := -1 +OUTER_GINKGO_ENTRY_POINT: + for goroutineIdx, goroutine := range pr.Goroutines { + for _, functionCall := range goroutine.Stack { + if strings.Contains(functionCall.Function, "ginkgo/v2.RunSpecs") { + ginkgoEntryPointIdx = goroutineIdx + break OUTER_GINKGO_ENTRY_POINT + } + } + } + + // Now we go through all goroutines and highlight any lines with packages in `packagesOfInterest` + // Any goroutines with highlighted lines end up in the HighlightGoRoutines + for goroutineIdx, goroutine := range pr.Goroutines { + if goroutineIdx == ginkgoEntryPointIdx { + continue + } + if goroutineIdx == specGoRoutineIdx { + pr.Goroutines[goroutineIdx].IsSpecGoroutine = true + } + for functionCallIdx, functionCall := range goroutine.Stack { + if isPackageOfInterest(functionCall.Filename) { + goroutine.Stack[functionCallIdx].Highlight = true + goroutine.Stack[functionCallIdx].Source, goroutine.Stack[functionCallIdx].SourceHighlight = fetchSource(functionCall.Filename, functionCall.Line, 2, sourceRoots) + } + } + } + + if !includeAll { + goroutines := []types.Goroutine{pr.SpecGoroutine()} + goroutines = append(goroutines, pr.HighlightedGoroutines()...) + pr.Goroutines = goroutines + } + + return pr, nil +} + +func extractRunningGoroutines() ([]types.Goroutine, error) { + var stack []byte + for size := 64 * 1024; ; size *= 2 { + stack = make([]byte, size) + if n := runtime.Stack(stack, true); n < size { + stack = stack[:n] + break + } + } + + r := bufio.NewReader(bytes.NewReader(stack)) + out := []types.Goroutine{} + idx := -1 + for { + line, err := r.ReadString('\n') + if err == io.EOF { + break + } + + line = strings.TrimSuffix(line, "\n") + + //skip blank lines + if line == "" { + continue + } + + //parse headers for new goroutine frames + if strings.HasPrefix(line, "goroutine") { + out = append(out, types.Goroutine{}) + idx = len(out) - 1 + + line = strings.TrimPrefix(line, "goroutine ") + line = strings.TrimSuffix(line, ":") + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine frame header: %s", line)) + } + out[idx].ID, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid goroutine ID: %s", fields[1])) + } + + out[idx].State = strings.TrimSuffix(strings.TrimPrefix(fields[1], "["), "]") + continue + } + + //if we are here we must be at a function call entry in the stack + functionCall := types.FunctionCall{ + Function: strings.TrimPrefix(line, "created by "), // no need to track 'created by' + } + + line, err = r.ReadString('\n') + line = strings.TrimSuffix(line, "\n") + if err == io.EOF { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) + } + line = strings.TrimLeft(line, " \t") + fields := strings.SplitN(line, ":", 2) + if len(fields) != 2 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line)) + } + functionCall.Filename = fields[0] + line = strings.Split(fields[1], " ")[0] + lineNumber, err := strconv.ParseInt(line, 10, 64) + functionCall.Line = int(lineNumber) + if err != nil { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) + } + out[idx].Stack = append(out[idx].Stack, functionCall) + } + + return out, nil +} + +func fetchSource(filename string, lineNumber int, span int, configuredSourceRoots []string) ([]string, int) { + if filename == "" { + return []string{}, 0 + } + + var lines []string + var ok bool + if lines, ok = _SOURCE_CACHE[filename]; !ok { + sourceRoots := []string{""} + sourceRoots = append(sourceRoots, configuredSourceRoots...) + var data []byte + var err error + var found bool + for _, root := range sourceRoots { + data, err = os.ReadFile(filepath.Join(root, filename)) + if err == nil { + found = true + break + } + } + if !found { + return []string{}, 0 + } + lines = strings.Split(string(data), "\n") + _SOURCE_CACHE[filename] = lines + } + + startIndex := lineNumber - span - 1 + endIndex := startIndex + span + span + 1 + if startIndex < 0 { + startIndex = 0 + } + if endIndex > len(lines) { + endIndex = len(lines) + } + highlightIndex := lineNumber - 1 - startIndex + return lines[startIndex:endIndex], highlightIndex +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go new file mode 100644 index 000000000..61e0ed306 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_bsd.go @@ -0,0 +1,11 @@ +//go:build freebsd || openbsd || netbsd || darwin || dragonfly +// +build freebsd openbsd netbsd darwin dragonfly + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGINFO, syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go new file mode 100644 index 000000000..ad30de459 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_unix.go @@ -0,0 +1,11 @@ +//go:build linux || solaris +// +build linux solaris + +package internal + +import ( + "os" + "syscall" +) + +var PROGRESS_SIGNALS = []os.Signal{syscall.SIGUSR1} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go new file mode 100644 index 000000000..0eca2516a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report_win.go @@ -0,0 +1,8 @@ +//go:build windows +// +build windows + +package internal + +import "os" + +var PROGRESS_SIGNALS = []os.Signal{} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go index 92072edd4..7c4ee5bb7 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec.go @@ -2,6 +2,7 @@ package internal import ( "strings" + "time" "github.com/onsi/ginkgo/v2/types" ) @@ -40,6 +41,21 @@ func (s Spec) FlakeAttempts() int { return flakeAttempts } +func (s Spec) MustPassRepeatedly() int { + mustPassRepeatedly := 0 + for i := range s.Nodes { + if s.Nodes[i].MustPassRepeatedly > 0 { + mustPassRepeatedly = s.Nodes[i].MustPassRepeatedly + } + } + + return mustPassRepeatedly +} + +func (s Spec) SpecTimeout() time.Duration { + return s.FirstNodeWithType(types.NodeTypeIt).SpecTimeout +} + type Specs []Spec func (s Specs) HasAnySpecsMarkedPending() bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go new file mode 100644 index 000000000..8f569dd35 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -0,0 +1,90 @@ +package internal + +import ( + "context" + "sort" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type SpecContext interface { + context.Context + + SpecReport() types.SpecReport + AttachProgressReporter(func() string) func() +} + +type specContext struct { + context.Context + + cancel context.CancelFunc + lock *sync.Mutex + progressReporters map[int]func() string + prCounter int + + suite *Suite +} + +/* +SpecContext includes a reference to `suite` and embeds itself in itself as a "GINKGO_SPEC_CONTEXT" value. This allows users to create child Contexts without having down-stream consumers (e.g. Gomega) lose access to the SpecContext and its methods. This allows us to build extensions on top of Ginkgo that simply take an all-encompassing context. + +Note that while SpecContext is used to enforce deadlines by Ginkgo it is not configured as a context.WithDeadline. Instead, Ginkgo owns responsibility for cancelling the context when the deadline elapses. + +This is because Ginkgo needs finer control over when the context is canceled. Specifically, Ginkgo needs to generate a ProgressReport before it cancels the context to ensure progress is captured where the spec is currently running. The only way to avoid a race here is to manually control the cancellation. +*/ +func NewSpecContext(suite *Suite) *specContext { + ctx, cancel := context.WithCancel(context.Background()) + sc := &specContext{ + cancel: cancel, + suite: suite, + lock: &sync.Mutex{}, + prCounter: 0, + progressReporters: map[int]func() string{}, + } + ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo + sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies + + return sc +} + +func (sc *specContext) SpecReport() types.SpecReport { + return sc.suite.CurrentSpecReport() +} + +func (sc *specContext) AttachProgressReporter(reporter func() string) func() { + sc.lock.Lock() + defer sc.lock.Unlock() + sc.prCounter += 1 + prCounter := sc.prCounter + sc.progressReporters[prCounter] = reporter + + return func() { + sc.lock.Lock() + defer sc.lock.Unlock() + delete(sc.progressReporters, prCounter) + } +} + +func (sc *specContext) QueryProgressReporters() []string { + sc.lock.Lock() + keys := []int{} + for key := range sc.progressReporters { + keys = append(keys, key) + } + sort.Ints(keys) + reporters := []func() string{} + for _, key := range keys { + reporters = append(reporters, sc.progressReporters[key]) + } + sc.lock.Unlock() + + if len(reporters) == 0 { + return nil + } + out := []string{} + for _, reporter := range reporters { + out = append(out, reporter()) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 8b0d68ce9..432bd217b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -2,9 +2,9 @@ package internal import ( "fmt" + "sync" "time" - "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal/interrupt_handler" "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" @@ -34,11 +34,29 @@ type Suite struct { outputInterceptor OutputInterceptor interruptHandler interrupt_handler.InterruptHandlerInterface config types.SuiteConfig + deadline time.Time - skipAll bool - report types.Report - currentSpecReport types.SpecReport - currentNode Node + skipAll bool + report types.Report + currentSpecReport types.SpecReport + currentNode Node + currentNodeStartTime time.Time + + currentSpecContext *specContext + + progressStepCursor ProgressStepCursor + + /* + We don't need to lock around all operations. Just those that *could* happen concurrently. + + Suite, generally, only runs one node at a time - and so the possibiity for races is small. In fact, the presence of a race usually indicates the user has launched a goroutine that has leaked past the node it was launched in. + + However, there are some operations that can happen concurrently: + + - AddReportEntry and CurrentSpecReport can be accessed at any point by the user - including in goroutines that outlive the node intentionally (see, e.g. #1020). They both form a self-contained read-write pair and so a lock in them is sufficent. + - generateProgressReport can be invoked at any point in time by an interrupt or a progres poll. Moreover, it requires access to currentSpecReport, currentNode, currentNodeStartTime, and progressStepCursor. To make it threadsafe we need to lock around generateProgressReport when we read those variables _and_ everywhere those variables are *written*. In general we don't need to worry about all possible field writes to these variables as what `generateProgressReport` does with these variables is fairly selective (hence the name of the lock). Specifically, we dont' need to lock around state and failure message changes on `currentSpecReport` - just the setting of the variable itself. + */ + selectiveLock *sync.Mutex client parallel_support.Client } @@ -47,6 +65,8 @@ func NewSuite() *Suite { return &Suite{ tree: &TreeNode{}, phase: PhaseBuildTopLevel, + + selectiveLock: &sync.Mutex{}, } } @@ -63,7 +83,7 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } @@ -80,11 +100,23 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.interruptHandler = interruptHandler suite.config = suiteConfig + if suite.config.Timeout > 0 { + suite.deadline = time.Now().Add(suite.config.Timeout) + } + + cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) + success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + cancelProgressHandler() + return success, hasProgrammaticFocus } +func (suite *Suite) InRunPhase() bool { + return suite.phase == PhaseRun +} + /* Tree Construction methods @@ -139,7 +171,7 @@ func (suite *Suite) PushNode(node Node) error { err = types.GinkgoErrors.CaughtPanicDuringABuildPhase(e, node.CodeLocation) } }() - node.Body() + node.Body(nil) return err }() suite.tree = parentTree @@ -204,18 +236,35 @@ func (suite *Suite) pushCleanupNode(node Node) error { return nil } +/* + Pushing and popping the Step Cursor stack +*/ + +func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + suite.progressStepCursor = cursor +} + /* Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() report := suite.currentSpecReport if suite.writer != nil { report.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) } + report.ReportEntries = make([]ReportEntry, len(report.ReportEntries)) + copy(report.ReportEntries, suite.currentSpecReport.ReportEntries) return report } func (suite *Suite) AddReportEntry(entry ReportEntry) error { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } @@ -223,6 +272,45 @@ func (suite *Suite) AddReportEntry(entry ReportEntry) error { return nil } +func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + suite.selectiveLock.Lock() + defer suite.selectiveLock.Unlock() + + var additionalReports []string + if suite.currentSpecContext != nil { + additionalReports = suite.currentSpecContext.QueryProgressReporters() + } + stepCursor := suite.progressStepCursor + + gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport) + + if err != nil { + fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) + } + return pr +} + +func (suite *Suite) handleProgressSignal() { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}You've requested a progress report:{{/}}" + suite.emitProgressReport(report) +} + +func (suite *Suite) emitProgressReport(report types.ProgressReport) { + suite.selectiveLock.Lock() + suite.currentSpecReport.ProgressReports = append(suite.currentSpecReport.ProgressReports, report.WithoutCapturedGinkgoWriterOutput()) + suite.selectiveLock.Unlock() + + suite.reporter.EmitProgressReport(report) + if suite.isRunningInParallel() { + err := suite.client.PostEmitProgressReport(report) + if err != nil { + fmt.Println(err.Error()) + } + } +} + func (suite *Suite) isRunningInParallel() bool { return suite.config.ParallelTotal > 1 } @@ -296,7 +384,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s // the complexity for running groups of specs is very high because of Ordered containers and FlakeAttempts // we encapsulate that complexity in the notion of a Group that can run // Group is really just an extension of suite so it gets passed a suite and has access to all its internals - // Note that group is stateful and intedned for single use! + // Note that group is stateful and intended for single use! newGroup(suite).run(specs.AtIndices(groupedSpecIndices[groupedSpecIdx])) } @@ -309,12 +397,16 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) interruptStatus := suite.interruptHandler.Status() - if interruptStatus.Interrupted { + if interruptStatus.Interrupted() { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, interruptStatus.Cause.String()) suite.report.SuiteSucceeded = false } suite.report.EndTime = time.Now() suite.report.RunTime = suite.report.EndTime.Sub(suite.report.StartTime) + if !suite.deadline.IsZero() && suite.report.EndTime.After(suite.deadline) { + suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite Timeout Elapsed") + suite.report.SuiteSucceeded = false + } if suite.config.ParallelProcess == 1 { suite.runReportAfterSuite() @@ -328,16 +420,18 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { - interruptStatus := suite.interruptHandler.Status() beforeSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeBeforeSuite | types.NodeTypeSynchronizedBeforeSuite) - if !beforeSuiteNode.IsZero() && !interruptStatus.Interrupted && numSpecsThatWillBeRun > 0 { + if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: beforeSuiteNode.NodeType, LeafNodeLocation: beforeSuiteNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(beforeSuiteNode, interruptStatus.Channel) + suite.runSuiteNode(beforeSuiteNode) if suite.currentSpecReport.State.Is(types.SpecStateSkipped) { suite.report.SpecialSuiteFailureReasons = append(suite.report.SpecialSuiteFailureReasons, "Suite skipped in BeforeSuite") suite.skipAll = true @@ -349,26 +443,32 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { afterSuiteNode := suite.suiteNodes.FirstNodeWithType(types.NodeTypeAfterSuite | types.NodeTypeSynchronizedAfterSuite) if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: afterSuiteNode.NodeType, LeafNodeLocation: afterSuiteNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(afterSuiteNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(afterSuiteNode) suite.processCurrentSpecReport() } afterSuiteCleanup := suite.cleanupNodes.WithType(types.NodeTypeCleanupAfterSuite).Reverse() if len(afterSuiteCleanup) > 0 { for _, cleanupNode := range afterSuiteCleanup { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: cleanupNode.NodeType, LeafNodeLocation: cleanupNode.CodeLocation, ParallelProcess: suite.config.ParallelProcess, } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) - suite.runSuiteNode(cleanupNode, suite.interruptHandler.Status().Channel) + suite.runSuiteNode(cleanupNode) suite.processCurrentSpecReport() } } @@ -376,12 +476,15 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { func (suite *Suite) runReportAfterSuite() { for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { + suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ LeafNodeType: node.NodeType, LeafNodeLocation: node.CodeLocation, LeafNodeText: node.Text, ParallelProcess: suite.config.ParallelProcess, } + suite.selectiveLock.Unlock() + suite.reporter.WillRun(suite.currentSpecReport) suite.runReportAfterSuiteNode(node, suite.report) suite.processCurrentSpecReport() @@ -389,10 +492,6 @@ func (suite *Suite) runReportAfterSuite() { } func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { - if suite.config.DryRun { - return - } - nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { nodes = nodes.SortedByDescendingNestingLevel() @@ -408,16 +507,11 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() report := suite.currentSpecReport - nodes[i].Body = func() { + nodes[i].Body = func(SpecContext) { nodes[i].ReportEachBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a %s node. To avoid an invalid report the %s node will not be interrupted however subsequent tests will be skipped.{{/}}\n\n{{bold}}The running %s node is at:\n%s.{{/}}", - nodeType, nodeType, nodeType, - nodes[i].CodeLocation, - )) - state, failure := suite.runNode(nodes[i], nil, spec.Nodes.BestTextFor(nodes[i])) - suite.interruptHandler.ClearInterruptPlaceholderMessage() + state, failure := suite.runNode(nodes[i], time.Time{}, spec.Nodes.BestTextFor(nodes[i])) + // If the spec is not in a failure state (i.e. it's Passed/Skipped/Pending) and the reporter has failed, override the state. // Also, if the reporter is every aborted - always override the state to propagate the abort if (!suite.currentSpecReport.State.Is(types.SpecStateFailureStates) && state.Is(types.SpecStateFailureStates)) || state.Is(types.SpecStateAborted) { @@ -429,7 +523,7 @@ func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { } } -func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { +func (suite *Suite) runSuiteNode(node Node) { if suite.config.DryRun { suite.currentSpecReport.State = types.SpecStatePassed return @@ -442,13 +536,13 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { var err error switch node.NodeType { case types.NodeTypeBeforeSuite, types.NodeTypeAfterSuite: - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") case types.NodeTypeCleanupAfterSuite: if suite.config.ParallelTotal > 1 && suite.config.ParallelProcess == 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() } if err == nil { - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedBeforeSuite: var data []byte @@ -458,8 +552,9 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutputAndForwardTo(suite.client) } - node.Body = func() { data = node.SynchronizedBeforeSuiteProc1Body() } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { data = node.SynchronizedBeforeSuiteProc1Body(c) } + node.HasContext = node.SynchronizedBeforeSuiteProc1BodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelTotal > 1 { suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() suite.outputInterceptor.StartInterceptingOutput() @@ -476,19 +571,21 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { switch proc1State { case types.SpecStatePassed: runAllProcs = true - case types.SpecStateFailed, types.SpecStatePanicked: + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: err = types.GinkgoErrors.SynchronizedBeforeSuiteFailedOnProc1() case types.SpecStateInterrupted, types.SpecStateAborted, types.SpecStateSkipped: suite.currentSpecReport.State = proc1State } } if runAllProcs { - node.Body = func() { node.SynchronizedBeforeSuiteAllProcsBody(data) } - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.Body = func(c SpecContext) { node.SynchronizedBeforeSuiteAllProcsBody(c, data) } + node.HasContext = node.SynchronizedBeforeSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") } case types.NodeTypeSynchronizedAfterSuite: node.Body = node.SynchronizedAfterSuiteAllProcsBody - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteAllProcsBodyHasContext + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") if suite.config.ParallelProcess == 1 { if suite.config.ParallelTotal > 1 { err = suite.client.BlockUntilNonprimaryProcsHaveFinished() @@ -500,7 +597,8 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { } node.Body = node.SynchronizedAfterSuiteProc1Body - state, failure := suite.runNode(node, interruptChannel, "") + node.HasContext = node.SynchronizedAfterSuiteProc1BodyHasContext + state, failure := suite.runNode(node, time.Time{}, "") if suite.currentSpecReport.State.Is(types.SpecStatePassed) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = state, failure } @@ -521,11 +619,6 @@ func (suite *Suite) runSuiteNode(node Node, interruptChannel chan interface{}) { } func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { - if suite.config.DryRun { - suite.currentSpecReport.State = types.SpecStatePassed - return - } - suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() @@ -539,13 +632,8 @@ func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { report = report.Add(aggregatedReport) } - node.Body = func() { node.ReportAfterSuiteBody(report) } - suite.interruptHandler.SetInterruptPlaceholderMessage(formatter.Fiw(0, formatter.COLS, - "{{yellow}}Ginkgo received an interrupt signal but is currently running a ReportAfterSuite node. To avoid an invalid report the ReportAfterSuite node will not be interrupted.{{/}}\n\n{{bold}}The running ReportAfterSuite node is at:\n%s.{{/}}", - node.CodeLocation, - )) - suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, nil, "") - suite.interruptHandler.ClearInterruptPlaceholderMessage() + node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) } + suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) @@ -555,17 +643,35 @@ func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { return } -func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text string) (types.SpecState, types.Failure) { +func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll | types.NodeTypeCleanupAfterSuite) { suite.cleanupNodes = suite.cleanupNodes.WithoutNode(node) } + interruptStatus := suite.interruptHandler.Status() + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport && !node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + return types.SpecStateSkipped, types.Failure{} + } + + suite.selectiveLock.Lock() suite.currentNode = node + suite.currentNodeStartTime = time.Now() + suite.progressStepCursor = ProgressStepCursor{} + suite.selectiveLock.Unlock() defer func() { + suite.selectiveLock.Lock() suite.currentNode = Node{} + suite.currentNodeStartTime = time.Time{} + suite.selectiveLock.Unlock() }() - if suite.config.EmitSpecProgress { + if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting { if text == "" { text = "TOP-LEVEL" } @@ -582,6 +688,49 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s } else { failure.FailureNodeContext, failure.FailureNodeContainerIndex = types.FailureNodeInContainer, node.NestingLevel-1 } + var outcome types.SpecState + + gracePeriod := suite.config.GracePeriod + if node.GracePeriod >= 0 { + gracePeriod = node.GracePeriod + } + + now := time.Now() + deadline := suite.deadline + if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { + deadline = specDeadline + } + if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { + deadline = now.Add(node.NodeTimeout) + } + if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { + //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't + if node.NodeTimeout > 0 { + deadline = now.Add(node.NodeTimeout) + } else { + deadline = now.Add(gracePeriod) + } + } + + if !node.HasContext { + // this maps onto the pre-context behavior: + // - an interrupted node exits immediately. with this, context-less nodes that are in a spec with a SpecTimeout and/or are interrupted by other means will simply exit immediately after the timeout/interrupt + // - clean up nodes have up to GracePeriod (formerly hard-coded at 30s) to complete before they are interrupted + gracePeriod = 0 + } + + sc := NewSpecContext(suite) + defer sc.cancel() + + suite.selectiveLock.Lock() + suite.currentSpecContext = sc + suite.selectiveLock.Unlock() + + var deadlineChannel <-chan time.Time + if !deadline.IsZero() { + deadlineChannel = time.After(deadline.Sub(now)) + } + var gracePeriodChannel <-chan time.Time outcomeC := make(chan types.SpecState) failureC := make(chan types.Failure) @@ -593,26 +742,125 @@ func (suite *Suite) runNode(node Node, interruptChannel chan interface{}, text s suite.failer.Panic(types.NewCodeLocationWithStackTrace(2), e) } - outcome, failureFromRun := suite.failer.Drain() - outcomeC <- outcome + outcomeFromRun, failureFromRun := suite.failer.Drain() + outcomeC <- outcomeFromRun failureC <- failureFromRun }() - node.Body() + node.Body(sc) finished = true }() - select { - case outcome := <-outcomeC: - failureFromRun := <-failureC - if outcome == types.SpecStatePassed { - return outcome, types.Failure{} + // progress polling timer and channel + var emitProgressNow <-chan time.Time + var progressPoller *time.Timer + var pollProgressAfter, pollProgressInterval = suite.config.PollProgressAfter, suite.config.PollProgressInterval + if node.PollProgressAfter >= 0 { + pollProgressAfter = node.PollProgressAfter + } + if node.PollProgressInterval >= 0 { + pollProgressInterval = node.PollProgressInterval + } + if pollProgressAfter > 0 { + progressPoller = time.NewTimer(pollProgressAfter) + emitProgressNow = progressPoller.C + defer progressPoller.Stop() + } + + // now we wait for an outcome, an interrupt, a timeout, or a progress poll + for { + select { + case outcomeFromRun := <-outcomeC: + failureFromRun := <-failureC + if outcome == types.SpecStateInterrupted { + // we've already been interrupted. we just managed to actually exit + // before the grace period elapsed + return outcome, failure + } else if outcome == types.SpecStateTimedout { + // we've already timed out. we just managed to actually exit + // before the grace period elapsed. if we have a failure message we should include it + if outcomeFromRun != types.SpecStatePassed { + failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic + failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message + } + return outcome, failure + } + if outcomeFromRun.Is(types.SpecStatePassed) { + return outcomeFromRun, types.Failure{} + } else { + failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic + return outcomeFromRun, failure + } + case <-gracePeriodChannel: + if node.HasContext && outcome.Is(types.SpecStateTimedout) { + report := suite.generateProgressReport(false) + report.Message = "{{bold}}{{orange}}A running node failed to exit in time{{/}}\nGinkgo is moving on but a node has timed out and failed to exit before its grace period elapsed. The node has now leaked and is running in the background.\nHere's a current progress report:" + suite.emitProgressReport(report) + } + return outcome, failure + case <-deadlineChannel: + // we're out of time - the outcome is a timeout and we capture the failure and progress report + outcome = types.SpecStateTimedout + failure.Message, failure.Location = "Timedout", node.CodeLocation + failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}" + deadlineChannel = nil + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where + // the spec is actually stuck + sc.cancel() + //and now we wait for the grace period + gracePeriodChannel = time.After(gracePeriod) + case <-interruptStatus.Channel: + interruptStatus = suite.interruptHandler.Status() + deadlineChannel = nil // don't worry about deadlines, time's up now + + if outcome == types.SpecStateInvalid { + outcome = types.SpecStateInterrupted + failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation + if interruptStatus.ShouldIncludeProgressReport() { + failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" + } + } + + var report types.ProgressReport + if interruptStatus.ShouldIncludeProgressReport() { + report = suite.generateProgressReport(false) + } + + sc.cancel() + + if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { + if interruptStatus.ShouldIncludeProgressReport() { + report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(report) + } + return outcome, failure + } + if interruptStatus.ShouldIncludeProgressReport() { + if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { + report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { + report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + } + suite.emitProgressReport(report) + } + + if gracePeriodChannel == nil { + // we haven't given grace yet... so let's + gracePeriodChannel = time.After(gracePeriod) + } else { + // we've already given grace. time's up. now. + return outcome, failure + } + case <-emitProgressNow: + report := suite.generateProgressReport(false) + report.Message = "{{bold}}Automatically polling progress:{{/}}" + suite.emitProgressReport(report) + if pollProgressInterval > 0 { + progressPoller.Reset(pollProgressInterval) + } } - failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic - return outcome, failure - case <-interruptChannel: - failure.Message, failure.Location = suite.interruptHandler.InterruptMessageWithStackTraces(), node.CodeLocation - return types.SpecStateInterrupted, failure } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index b02a90e2a..da21d3b06 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -5,6 +5,9 @@ import ( "fmt" "io" "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" ) type WriterMode uint @@ -21,7 +24,7 @@ type WriterInterface interface { Bytes() []byte } -//Writer impplements WriterInterface and GinkgoWriterInterface +//Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer @@ -101,3 +104,9 @@ func (w *Writer) Printf(format string, a ...interface{}) { func (w *Writer) Println(a ...interface{}) { fmt.Fprintln(w, a...) } + +func GinkgoLogrFunc(writer *Writer) logr.Logger { + return funcr.New(func(prefix, args string) { + writer.Printf("%s", args) + }, funcr.Options{}) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index f39802ffa..d09488c28 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,7 @@ import ( "io" "runtime" "strings" + "time" "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/types" @@ -134,9 +135,11 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { denoter = fmt.Sprintf("[%s]", report.LeafNodeType) } + highlightColor = r.highlightColorForState(report.State) + switch report.State { case types.SpecStatePassed: - highlightColor, succinctLocationBlock = "{{green}}", v.LT(types.VerbosityLevelVerbose) + succinctLocationBlock = v.LT(types.VerbosityLevelVerbose) emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { @@ -146,7 +149,7 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { } } else { header, stream = denoter, true - if report.NumAttempts > 1 { + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false } if report.RunTime > r.conf.SlowSpecThreshold { @@ -157,7 +160,6 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { stream = false } case types.SpecStatePending: - highlightColor = "{{yellow}}" includeRuntime, emitGinkgoWriterOutput = false, false if v.Is(types.VerbosityLevelSuccinct) { header, stream = "P", true @@ -165,22 +167,26 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) } case types.SpecStateSkipped: - highlightColor = "{{cyan}}" if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { header = "S [SKIPPED]" } else { header, stream = "S", true } case types.SpecStateFailed: - highlightColor, header = "{{red}}", fmt.Sprintf("%s [FAILED]", denoter) + header = fmt.Sprintf("%s [FAILED]", denoter) + case types.SpecStateTimedout: + header = fmt.Sprintf("%s [TIMEDOUT]", denoter) case types.SpecStatePanicked: - highlightColor, header = "{{magenta}}", fmt.Sprintf("%s! [PANICKED]", denoter) + header = fmt.Sprintf("%s! [PANICKED]", denoter) case types.SpecStateInterrupted: - highlightColor, header = "{{orange}}", fmt.Sprintf("%s! [INTERRUPTED]", denoter) + header = fmt.Sprintf("%s! [INTERRUPTED]", denoter) case types.SpecStateAborted: - highlightColor, header = "{{coral}}", fmt.Sprintf("%s! [ABORTED]", denoter) + header = fmt.Sprintf("%s! [ABORTED]", denoter) } + if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 { + header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false + } // Emit stream and return if stream { r.emit(r.f(highlightColor + header + "{{/}}")) @@ -208,9 +214,7 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Captured GinkgoWriter Output if emitGinkgoWriterOutput && hasGW { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedGinkgoWriterOutput)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) + r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0) } if hasEmittableReports { @@ -232,28 +236,96 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { // Emit Failure Message if !report.Failure.IsZero() { r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.Message)) - r.emitBlock(r.fi(1, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.Location)) - if report.Failure.ForwardedPanic != "" { - r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"%s{{/}}", report.Failure.ForwardedPanic)) - } + r.EmitFailure(1, report.State, report.Failure, false) + } - if r.conf.FullTrace || report.Failure.ForwardedPanic != "" { + if len(report.AdditionalFailures) > 0 { + if v.GTE(types.VerbosityLevelVerbose) { r.emitBlock("\n") - r.emitBlock(r.fi(1, highlightColor+"Full Stack Trace{{/}}")) - r.emitBlock(r.fi(2, "%s", report.Failure.Location.FullStackTrace)) + r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}")) + for i, additionalFailure := range report.AdditionalFailures { + r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true) + if i < len(report.AdditionalFailures)-1 { + r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + } else { + r.emitBlock("\n") + r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}")) + for _, additionalFailure := range report.AdditionalFailures { + r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s", + r.humanReadableState(additionalFailure.State), + additionalFailure.Failure.FailureNodeType, + additionalFailure.Failure.Location, + )) + } + } } r.emitDelimiter() } +func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { + switch state { + case types.SpecStatePassed: + return "{{green}}" + case types.SpecStatePending: + return "{{yellow}}" + case types.SpecStateSkipped: + return "{{cyan}}" + case types.SpecStateFailed: + return "{{red}}" + case types.SpecStateTimedout: + return "{{orange}}" + case types.SpecStatePanicked: + return "{{magenta}}" + case types.SpecStateInterrupted: + return "{{orange}}" + case types.SpecStateAborted: + return "{{coral}}" + default: + return "{{gray}}" + } +} + +func (r *DefaultReporter) humanReadableState(state types.SpecState) string { + return strings.ToUpper(state.String()) +} + +func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) { + highlightColor := r.highlightColorForState(state) + if includeState { + r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state))) + } + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location)) + if failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) + } + + if r.conf.FullTrace || failure.ForwardedPanic != "" { + r.emitBlock("\n") + r.emitBlock(r.fi(indent, highlightColor+"Full Stack Trace{{/}}")) + r.emitBlock(r.fi(indent+1, "%s", failure.Location.FullStackTrace)) + } + + if !failure.ProgressReport.IsZero() { + r.emitBlock("\n") + r.emitProgressReport(indent, false, failure.ProgressReport) + } +} + func (r *DefaultReporter) SuiteDidEnd(report types.Report) { failures := report.SpecReports.WithState(types.SpecStateFailureStates) - if len(failures) > 1 { + if len(failures) > 0 { r.emitBlock("\n\n") - r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } for _, specReport := range failures { highlightColor, heading := "{{red}}", "[FAIL]" switch specReport.State { @@ -261,6 +333,8 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) { highlightColor, heading = "{{magenta}}", "[PANICKED!]" case types.SpecStateAborted: highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" case types.SpecStateInterrupted: highlightColor, heading = "{{orange}}", "[INTERRUPTED]" } @@ -305,11 +379,169 @@ func (r *DefaultReporter) SuiteDidEnd(report types.Report) { if specs.CountOfFlakedSpecs() > 0 { r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) + } r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } } +func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { + r.emitDelimiter() + + if report.RunningInParallel { + r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + } + r.emitProgressReport(0, true, report) + r.emitDelimiter() +} + +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { + if report.Message != "" { + r.emitBlock(r.fi(indent, report.Message+"\n")) + indent += 1 + } + if report.LeafNodeText != "" { + subjectIndent := indent + if len(report.ContainerHierarchyTexts) > 0 { + r.emit(r.fi(indent, r.cycleJoin(report.ContainerHierarchyTexts, " "))) + r.emit(" ") + subjectIndent = 0 + } + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) + indent += 1 + } + if report.CurrentNodeType != types.NodeTypeInvalid { + r.emit(r.fi(indent, "In {{bold}}{{orange}}[%s]{{/}}", report.CurrentNodeType)) + if report.CurrentNodeText != "" && !report.CurrentNodeType.Is(types.NodeTypeIt) { + r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) + } + + r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) + indent += 1 + } + if report.CurrentStepText != "" { + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) + indent += 1 + } + + if indent > 0 { + indent -= 1 + } + + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) { + r.emit("\n") + r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10) + } + + if !report.SpecGoroutine().IsZero() { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Spec Goroutine{{/}}\n")) + r.emitGoroutines(indent, report.SpecGoroutine()) + } + + if len(report.AdditionalReports) > 0 { + r.emit("\n") + r.emitBlock(r.fi(indent, "{{gray}}Begin Additional Progress Reports >>{{/}}")) + for i, additionalReport := range report.AdditionalReports { + r.emit(r.fi(indent+1, additionalReport)) + if i < len(report.AdditionalReports)-1 { + r.emitBlock(r.fi(indent+1, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Additional Progress Reports{{/}}")) + } + + highlightedGoroutines := report.HighlightedGoroutines() + if len(highlightedGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{bold}}{{underline}}Goroutines of Interest{{/}}\n")) + r.emitGoroutines(indent, highlightedGoroutines...) + } + + otherGoroutines := report.OtherGoroutines() + if len(otherGoroutines) > 0 { + r.emit("\n") + r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) + r.emitGoroutines(indent, otherGoroutines...) + } +} + +func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) { + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + if limit == 0 { + r.emitBlock(r.fi(indent+1, "%s", output)) + } else { + lines := strings.Split(output, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", output)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) +} + +func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { + for idx, g := range goroutines { + color := "{{gray}}" + if g.HasHighlights() { + color = "{{orange}}" + } + r.emit(r.fi(indent, color+"goroutine %d [%s]{{/}}\n", g.ID, g.State)) + for _, fc := range g.Stack { + if fc.Highlight { + r.emit(r.fi(indent, color+"{{bold}}> %s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, color+"{{bold}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + r.emitSource(indent+3, fc) + } else { + r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", fc.Function)) + r.emit(r.fi(indent+2, "{{gray}}%s:%d{{/}}\n", fc.Filename, fc.Line)) + } + } + + if idx+1 < len(goroutines) { + r.emit("\n") + } + } +} + +func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { + lines := fc.Source + if len(lines) == 0 { + return + } + + lTrim := 100000 + for _, line := range lines { + lTrimLine := len(line) - len(strings.TrimLeft(line, " \t")) + if lTrimLine < lTrim && len(line) > 0 { + lTrim = lTrimLine + } + } + if lTrim == 100000 { + lTrim = 0 + } + + for idx, line := range lines { + if len(line) > lTrim { + line = line[lTrim:] + } + if idx == fc.SourceHighlight { + r.emit(r.fi(indent, "{{bold}}{{orange}}> %s{{/}}\n", line)) + } else { + r.emit(r.fi(indent, "| %s\n", line)) + } + } +} + /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { if len(s) > 0 { diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 87556d03c..fcea6ab17 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -41,7 +41,7 @@ type JUnitTestSuites struct { type JUnitTestSuite struct { // Name maps onto the description of the test suite - maps onto Report.SuiteDescription Name string `xml:"name,attr"` - // Package maps onto the aboslute path to the test suite - maps onto Report.SuitePath + // Package maps onto the absolute path to the test suite - maps onto Report.SuitePath Package string `xml:"package,attr"` // Tests maps onto the total number of specs in the test suite (this includes any suite nodes such as BeforeSuite) Tests int `xml:"tests,attr"` @@ -110,7 +110,7 @@ type JUnitSkipped struct { } type JUnitError struct { - //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interupted" + //Message maps onto the panic/exception thrown - equivalent to SpecReport.Failure.ForwardedPanic - or to "interrupted" Message string `xml:"message,attr"` //Type is one of "panicked" or "interrupted" Type string `xml:"type,attr"` @@ -171,8 +171,8 @@ func GenerateJUnitReport(report types.Report, dst string) error { Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - SystemOut: systemOutForUnstructureReporters(spec), - SystemErr: spec.CapturedGinkgoWriterOutput, + SystemOut: systemOutForUnstructuredReporters(spec), + SystemErr: systemErrForUnstructuredReporters(spec), } suite.Tests += 1 @@ -191,28 +191,35 @@ func GenerateJUnitReport(report types.Report, dst string) error { test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "failed", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), + } + suite.Failures += 1 + case types.SpecStateTimedout: + test.Failure = &JUnitFailure{ + Message: spec.Failure.Message, + Type: "timedout", + Description: failureDescriptionForUnstructuredReporters(spec), } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ - Message: "interrupted", + Message: spec.Failure.Message, Type: "interrupted", - Description: spec.Failure.Message, + Description: failureDescriptionForUnstructuredReporters(spec), } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ Message: spec.Failure.Message, Type: "aborted", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ Message: spec.Failure.ForwardedPanic, Type: "panicked", - Description: fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace), + Description: failureDescriptionForUnstructuredReporters(spec), } suite.Errors += 1 } @@ -278,7 +285,51 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) return messages, f.Close() } -func systemOutForUnstructureReporters(spec types.SpecReport) string { +func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + out.WriteString(spec.Failure.Location.String() + "\n") + out.WriteString(spec.Failure.Location.FullStackTrace) + if !spec.Failure.ProgressReport.IsZero() { + out.WriteString("\n") + NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport) + } + if len(spec.AdditionalFailures) > 0 { + out.WriteString("\nThere were additional failures detected after the initial failure:\n") + for i, additionalFailure := range spec.AdditionalFailures { + NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true) + if i < len(spec.AdditionalFailures)-1 { + out.WriteString("----------\n") + } + } + } + return out.String() +} + +func systemErrForUnstructuredReporters(spec types.SpecReport) string { + out := &strings.Builder{} + gw := spec.CapturedGinkgoWriterOutput + cursor := 0 + for _, pr := range spec.ProgressReports { + if cursor < pr.GinkgoWriterOffset { + if pr.GinkgoWriterOffset < len(gw) { + out.WriteString(gw[cursor:pr.GinkgoWriterOffset]) + cursor = pr.GinkgoWriterOffset + } else if cursor < len(gw) { + out.WriteString(gw[cursor:]) + cursor = len(gw) + } + } + NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr) + } + + if cursor < len(gw) { + out.WriteString(gw[cursor:]) + } + + return out.String() +} + +func systemOutForUnstructuredReporters(spec types.SpecReport) string { systemOut := spec.CapturedStdOutErr if len(spec.ReportEntries) > 0 { systemOut += "\nReport Entries:\n" diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go index 29f84e7c1..f79f005db 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -9,11 +9,13 @@ type Reporter interface { WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) + EmitProgressReport(progressReport types.ProgressReport) } type NoopReporter struct{} -func (n NoopReporter) SuiteWillBegin(report types.Report) {} -func (n NoopReporter) WillRun(report types.SpecReport) {} -func (n NoopReporter) DidRun(report types.SpecReport) {} -func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index 2aa2f1845..c1863496d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -17,12 +17,12 @@ import ( ) func tcEscape(s string) string { - s = strings.Replace(s, "|", "||", -1) - s = strings.Replace(s, "'", "|'", -1) - s = strings.Replace(s, "\n", "|n", -1) - s = strings.Replace(s, "\r", "|r", -1) - s = strings.Replace(s, "[", "|[", -1) - s = strings.Replace(s, "]", "|]", -1) + s = strings.ReplaceAll(s, "|", "||") + s = strings.ReplaceAll(s, "'", "|'") + s = strings.ReplaceAll(s, "\n", "|n") + s = strings.ReplaceAll(s, "\r", "|r") + s = strings.ReplaceAll(s, "[", "|[") + s = strings.ReplaceAll(s, "]", "|]") return s } @@ -60,20 +60,24 @@ func GenerateTeamcityReport(report types.Report, dst string) error { } fmt.Fprintf(f, "##teamcity[testIgnored name='%s' message='%s']\n", name, tcEscape(message)) case types.SpecStateFailed: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='failed - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStatePanicked: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='panicked - %s' details='%s']\n", name, tcEscape(spec.Failure.ForwardedPanic), tcEscape(details)) + case types.SpecStateTimedout: + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='timedout - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateInterrupted: - fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted' details='%s']\n", name, tcEscape(spec.Failure.Message)) + details := failureDescriptionForUnstructuredReporters(spec) + fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='interrupted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) case types.SpecStateAborted: - details := fmt.Sprintf("%s\n%s", spec.Failure.Location.String(), spec.Failure.Location.FullStackTrace) + details := failureDescriptionForUnstructuredReporters(spec) fmt.Fprintf(f, "##teamcity[testFailed name='%s' message='aborted - %s' details='%s']\n", name, tcEscape(spec.Failure.Message), tcEscape(details)) } - fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructureReporters(spec))) - fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(spec.CapturedGinkgoWriterOutput)) + fmt.Fprintf(f, "##teamcity[testStdOut name='%s' out='%s']\n", name, tcEscape(systemOutForUnstructuredReporters(spec))) + fmt.Fprintf(f, "##teamcity[testStdErr name='%s' out='%s']\n", name, tcEscape(systemErrForUnstructuredReporters(spec))) fmt.Fprintf(f, "##teamcity[testFinished name='%s' duration='%d']\n", name, int(spec.RunTime.Seconds()*1000.0)) } fmt.Fprintf(f, "##teamcity[testSuiteFinished name='%s']\n", tcEscape(report.SuiteDescription)) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index b35a6ed84..afc151b13 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -79,8 +79,11 @@ receives a SpecReport. They are called before the spec starts. You cannot nest any other Ginkgo nodes within a ReportBeforeEach node's closure. You can learn more about ReportBeforeEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically */ -func ReportBeforeEach(body func(SpecReport)) bool { - return pushNode(internal.NewReportBeforeEachNode(body, types.NewCodeLocation(1))) +func ReportBeforeEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeEach, "", combinedArgs...)) } /* @@ -90,8 +93,11 @@ receives a SpecReport. They are called after the spec has completed and receive You cannot nest any other Ginkgo nodes within a ReportAfterEach node's closure. You can learn more about ReportAfterEach here: https://onsi.github.io/ginkgo/#generating-reports-programmatically */ -func ReportAfterEach(body func(SpecReport)) bool { - return pushNode(internal.NewReportAfterEachNode(body, types.NewCodeLocation(1))) +func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) } /* @@ -103,14 +109,16 @@ ReportAftersuite nodes must be created at the top-level (i.e. not nested in a Co When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportAfterSuite and that it is passed a report that is aggregated across all parallel nodes -In addition to using ReportAfterSuite to programatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. +In addition to using ReportAfterSuite to programmatically generate suite reports, you can also generate JSON, JUnit, and Teamcity formatted reports using the --json-report, --junit-report, and --teamcity-report ginkgo CLI flags. You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports */ -func ReportAfterSuite(text string, body func(Report)) bool { - return pushNode(internal.NewReportAfterSuiteNode(text, body, types.NewCodeLocation(1))) +func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterSuite, text, combinedArgs...)) } func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.ReporterConfig) { @@ -145,7 +153,8 @@ func registerReportAfterSuiteNodeForAutogeneratedReports(reporterConfig types.Re if reporterConfig.TeamcityReport != "" { flags = append(flags, "--teamcity-report") } - pushNode(internal.NewReportAfterSuiteNode( + pushNode(internal.NewNode( + deprecationTracker, types.NodeTypeReportAfterSuite, fmt.Sprintf("Autogenerated ReportAfterSuite for %s", strings.Join(flags, " ")), body, types.NewCustomCodeLocation("autogenerated by Ginkgo"), diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index d3029f157..683674462 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -1,6 +1,7 @@ package ginkgo import ( + "context" "fmt" "reflect" "strings" @@ -89,6 +90,8 @@ Subsequent arguments accept any Ginkgo decorators. These are filtered out and t Each Entry ends up generating an individual Ginkgo It. The body of the it is the Table Body function with the Entry parameters passed in. +If you want to generate interruptible specs simply write a Table function that accepts a SpecContext as its first argument. You can then decorate individual Entrys with the NodeTimeout and SpecTimeout decorators. + You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { @@ -119,12 +122,16 @@ You can mark a particular entry as pending with XEntry. This is equivalent to X */ var XEntry = PEntry +var contextType = reflect.TypeOf(new(context.Context)).Elem() +var specContextType = reflect.TypeOf(new(SpecContext)).Elem() + func generateTable(description string, args ...interface{}) { cl := types.NewCodeLocation(2) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} var itBody interface{} + var itBodyType reflect.Type var tableLevelEntryDescription interface{} tableLevelEntryDescription = func(args ...interface{}) string { @@ -135,8 +142,14 @@ func generateTable(description string, args ...interface{}) { return "Entry: " + strings.Join(out, ", ") } - for _, arg := range args { + if len(args) == 1 { + exitIfErr(types.GinkgoErrors.MissingParametersForTableFunction(cl)) + } + + for i, arg := range args { switch t := reflect.TypeOf(arg); { + case t == nil: + exitIfErr(types.GinkgoErrors.IncorrectParameterTypeForTable(i, "nil", cl)) case t == reflect.TypeOf(TableEntry{}): entries = append(entries, arg.(TableEntry)) case t == reflect.TypeOf([]TableEntry{}): @@ -150,6 +163,7 @@ func generateTable(description string, args ...interface{}) { exitIfErr(types.GinkgoErrors.MultipleEntryBodyFunctionsForTable(cl)) } itBody = arg + itBodyType = reflect.TypeOf(itBody) default: containerNodeArgs = append(containerNodeArgs, arg) } @@ -162,7 +176,7 @@ func generateTable(description string, args ...interface{}) { var description string switch t := reflect.TypeOf(entry.description); { case t == nil: - err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(tableLevelEntryDescription, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(tableLevelEntryDescription, entry.parameters)[0].String() } @@ -171,7 +185,7 @@ func generateTable(description string, args ...interface{}) { case t == reflect.TypeOf(""): description = entry.description.(string) case t.Kind() == reflect.Func && t.NumOut() == 1 && t.Out(0) == reflect.TypeOf(""): - err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation) + err = validateParameters(entry.description, entry.parameters, "Entry Description function", entry.codeLocation, false) if err == nil { description = invokeFunction(entry.description, entry.parameters)[0].String() } @@ -179,17 +193,37 @@ func generateTable(description string, args ...interface{}) { err = types.GinkgoErrors.InvalidEntryDescription(entry.codeLocation) } - if err == nil { - err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation) - } itNodeArgs := []interface{}{entry.codeLocation} itNodeArgs = append(itNodeArgs, entry.decorations...) - itNodeArgs = append(itNodeArgs, func() { - if err != nil { - panic(err) + + hasContext := false + if itBodyType.NumIn() > 0. { + if itBodyType.In(0).Implements(specContextType) { + hasContext = true + } else if itBodyType.In(0).Implements(contextType) && (len(entry.parameters) == 0 || !reflect.TypeOf(entry.parameters[0]).Implements(contextType)) { + hasContext = true } - invokeFunction(itBody, entry.parameters) - }) + } + + if err == nil { + err = validateParameters(itBody, entry.parameters, "Table Body function", entry.codeLocation, hasContext) + } + + if hasContext { + itNodeArgs = append(itNodeArgs, func(c SpecContext) { + if err != nil { + panic(err) + } + invokeFunction(itBody, append([]interface{}{c}, entry.parameters...)) + }) + } else { + itNodeArgs = append(itNodeArgs, func() { + if err != nil { + panic(err) + } + invokeFunction(itBody, entry.parameters) + }) + } pushNode(internal.NewNode(deprecationTracker, types.NodeTypeIt, description, itNodeArgs...)) } @@ -221,9 +255,14 @@ func invokeFunction(function interface{}, parameters []interface{}) []reflect.Va return reflect.ValueOf(function).Call(inValues) } -func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation) error { +func validateParameters(function interface{}, parameters []interface{}, kind string, cl types.CodeLocation, hasContext bool) error { funcType := reflect.TypeOf(function) limit := funcType.NumIn() + offset := 0 + if hasContext { + limit = limit - 1 + offset = 1 + } if funcType.IsVariadic() { limit = limit - 1 } @@ -236,13 +275,13 @@ func validateParameters(function interface{}, parameters []interface{}, kind str var i = 0 for ; i < limit; i++ { actual := reflect.TypeOf(parameters[i]) - expected := funcType.In(i) + expected := funcType.In(i + offset) if !(actual == nil) && !actual.AssignableTo(expected) { return types.GinkgoErrors.IncorrectParameterTypeToTableFunction(i+1, expected, actual, kind, cl) } } if funcType.IsVariadic() { - expected := funcType.In(limit).Elem() + expected := funcType.In(limit + offset).Elem() for ; i < len(parameters); i++ { actual := reflect.TypeOf(parameters[i]) if !(actual == nil) && !actual.AssignableTo(expected) { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index 00107d3ad..e4e9e38c6 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -1,5 +1,4 @@ package types - import ( "fmt" "os" @@ -65,7 +64,7 @@ func NewCodeLocationWithStackTrace(skip int) CodeLocation { func PruneStack(fullStackTrace string, skip int) string { stack := strings.Split(fullStackTrace, "\n") // Ensure that the even entries are the method names and the - // the odd entries the source code information. + // odd entries the source code information. if len(stack) > 0 && strings.HasPrefix(stack[0], "goroutine ") { // Ignore "goroutine 29 [running]:" line. stack = stack[1:] diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 8ebd329e8..f016c5c1f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -28,8 +28,12 @@ type SuiteConfig struct { FlakeAttempts int EmitSpecProgress bool DryRun bool + PollProgressAfter time.Duration + PollProgressInterval time.Duration Timeout time.Duration OutputInterceptorMode string + SourceRoots []string + GracePeriod time.Duration ParallelProcess int ParallelTotal int @@ -42,6 +46,7 @@ func NewDefaultSuiteConfig() SuiteConfig { Timeout: time.Hour, ParallelProcess: 1, ParallelTotal: 1, + GracePeriod: 30 * time.Second, } } @@ -168,7 +173,7 @@ func (g CLIConfig) ComputedNumCompilers() int { } // Configuration for the Ginkgo CLI capturing available go flags -// A subset of Go flags are exposed by Ginkgo. Some are avaiable at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). +// A subset of Go flags are exposed by Ginkgo. Some are available at compile time (e.g. ginkgo build) and others only at run time (e.g. ginkgo run - which has both build and run time flags). // More details can be found at: // https://docs.google.com/spreadsheets/d/1zkp-DS4hU4sAJl5eHh1UmgwxCPQhf3s5a8fbiOI8tJU/ type GoFlagsConfig struct { @@ -272,13 +277,21 @@ var SuiteConfigFlags = GinkgoFlags{ Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, + {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", + Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, + {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", + Usage: "The rate at which to emit node progress reports after poll-progress-after has elapsed."}, + {KeyPath: "S.SourceRoots", Name: "source-root", SectionKey: "debug", + Usage: "The location to look for source code when generating progress reports. You can pass multiple --source-root flags."}, {KeyPath: "S.Timeout", Name: "timeout", SectionKey: "debug", UsageDefaultValue: "1h", Usage: "Test suite fails if it does not complete within the specified timeout."}, + {KeyPath: "S.GracePeriod", Name: "grace-period", SectionKey: "debug", UsageDefaultValue: "30s", + Usage: "When interrupted, Ginkgo will wait for GracePeriod for the current running node to exit before moving on to the next one."}, {KeyPath: "S.OutputInterceptorMode", Name: "output-interceptor-mode", SectionKey: "debug", UsageArgument: "dup, swap, or none", Usage: "If set, ginkgo will use the specified output interception strategy when running in parallel. Defaults to dup on unix and swap on windows."}, {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", - Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expresions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -381,6 +394,10 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re errors = append(errors, GinkgoErrors.DryRunInParallelConfiguration()) } + if suiteConfig.GracePeriod <= 0 { + errors = append(errors, GinkgoErrors.GracePeriodCannotBeZero()) + } + if len(suiteConfig.FocusFiles) > 0 { _, err := ParseFileFilters(suiteConfig.FocusFiles) if err != nil { @@ -555,7 +572,7 @@ var GoRunFlags = GinkgoFlags{ } // VetAndInitializeCLIAndGoConfig validates that the Ginkgo CLI's configuration is sound -// It returns a potentially mutated copy of the config that rationalizes the configuraiton to ensure consistency for downstream consumers +// It returns a potentially mutated copy of the config that rationalizes the configuration to ensure consistency for downstream consumers func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsConfig) (CLIConfig, GoFlagsConfig, []error) { errors := []error{} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index 79ca4593d..2948dfa0c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -4,6 +4,7 @@ import ( "os" "strconv" "strings" + "sync" "unicode" "github.com/onsi/ginkgo/v2/formatter" @@ -84,11 +85,13 @@ func (d deprecations) Nodot() Deprecation { type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation + lock *sync.Mutex } func NewDeprecationTracker() *DeprecationTracker { return &DeprecationTracker{ deprecations: map[Deprecation][]CodeLocation{}, + lock: &sync.Mutex{}, } } @@ -102,6 +105,8 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod } } + d.lock.Lock() + defer d.lock.Unlock() if len(cl) == 1 { d.deprecations[deprecation] = append(d.deprecations[deprecation], cl[0]) } else { @@ -110,10 +115,14 @@ func (d *DeprecationTracker) TrackDeprecation(deprecation Deprecation, cl ...Cod } func (d *DeprecationTracker) DidTrackDeprecations() bool { + d.lock.Lock() + defer d.lock.Unlock() return len(d.deprecations) > 0 } func (d *DeprecationTracker) DeprecationsReport() string { + d.lock.Lock() + defer d.lock.Unlock() out := formatter.F("{{light-yellow}}You're using deprecated Ginkgo functionality:{{/}}\n") out += formatter.F("{{light-yellow}}============================================={{/}}\n") for deprecation, locations := range d.deprecations { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 6873f74db..b7ed5a21e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -80,7 +80,7 @@ func (g ginkgoErrors) PushingNodeInRunPhase(nodeType NodeType, cl CodeLocation) to the Ginkgo spec tree in a leaf node {{bold}}after{{/}} the specs started running. To enable randomization and parallelization Ginkgo requires the spec tree -to be fully construted up front. In practice, this means that you can +to be fully constructed up front. In practice, this means that you can only create nodes like {{bold}}[%s]{{/}} at the top-level or within the body of a {{bold}}Describe{{/}}, {{bold}}Context{{/}}, or {{bold}}When{{/}}.`, nodeType, nodeType), CodeLocation: cl, @@ -180,29 +180,73 @@ func (g ginkgoErrors) InvalidDeclarationOfFocusedAndPending(cl CodeLocation, nod } } +func (g ginkgoErrors) InvalidDeclarationOfFlakeAttemptsAndMustPassRepeatedly(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Combination of Decorators: FlakeAttempts and MustPassRepeatedly", + Message: formatter.F(`[%s] node was decorated with both FlakeAttempts and MustPassRepeatedly. At most one is allowed.`, nodeType), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) UnknownDecorator(cl CodeLocation, nodeType NodeType, decorator interface{}) error { return GinkgoError{ - Heading: "Unkown Decorator", - Message: formatter.F(`[%s] node was passed an unkown decorator: '%#v'`, nodeType, decorator), + Heading: "Unknown Decorator", + Message: formatter.F(`[%s] node was passed an unknown decorator: '%#v'`, nodeType, decorator), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForContainer(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } func (g ginkgoErrors) InvalidBodyType(t reflect.Type, cl CodeLocation, nodeType NodeType) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + if nodeType.Is(NodeTypeContainer) { + mustGet = "{{bold}}func(){{/}}" + } return GinkgoError{ Heading: "Invalid Function", - Message: formatter.F(`[%s] node must be passed {{bold}}func(){{/}} - i.e. functions that take nothing and return nothing. + Message: formatter.F(`[%s] node must be passed `+mustGet+`. You passed {{bold}}%s{{/}} instead.`, nodeType, t), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteProc1(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func() []byte{{/}}, {{bold}}func(ctx SpecContext) []byte{{/}}, or {{bold}}func(ctx context.Context) []byte{{/}}, {{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its first function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + +func (g ginkgoErrors) InvalidBodyTypeForSynchronizedBeforeSuiteAllProcs(t reflect.Type, cl CodeLocation) error { + mustGet := "{{bold}}func(){{/}}, {{bold}}func(ctx SpecContext){{/}}, or {{bold}}func(ctx context.Context){{/}}, {{bold}}func([]byte){{/}}, {{bold}}func(ctx SpecContext, []byte){{/}}, or {{bold}}func(ctx context.Context, []byte){{/}}" + return GinkgoError{ + Heading: "Invalid Function", + Message: formatter.F(`[SynchronizedBeforeSuite] node must be passed `+mustGet+` for its second function. +You passed {{bold}}%s{{/}} instead.`, t), + CodeLocation: cl, + DocLink: "node-decorators-overview", + } +} + func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Multiple Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but more than one was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but more than one was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } @@ -211,12 +255,30 @@ func (g ginkgoErrors) MultipleBodyFunctions(cl CodeLocation, nodeType NodeType) func (g ginkgoErrors) MissingBodyFunction(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: "Missing Functions", - Message: formatter.F(`[%s] node must be passed a single {{bold}}func(){{/}} - but none was passed in.`, nodeType), + Message: formatter.F(`[%s] node must be passed a single function - but none was passed in.`, nodeType), CodeLocation: cl, DocLink: "node-decorators-overview", } } +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextNode(cl CodeLocation, nodeType NodeType) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[%s] was passed NodeTimeout, SpecTimeout, or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`, nodeType), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + +func (g ginkgoErrors) InvalidTimeoutOrGracePeriodForNonContextCleanupNode(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid NodeTimeout SpecTimeout, or GracePeriod", + Message: formatter.F(`[DeferCleanup] was passed NodeTimeout or GracePeriod but does not have a callback that accepts a {{bold}}SpecContext{{/}} or {{bold}}context.Context{{/}}. You must accept a context to enable timeouts and grace periods`), + CodeLocation: cl, + DocLink: "spec-timeouts-and-interruptible-nodes", + } +} + /* Ordered Container errors */ func (g ginkgoErrors) InvalidSerialNodeInNonSerialOrderedContainer(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ @@ -292,6 +354,16 @@ func (g ginkgoErrors) AddReportEntryNotDuringRunPhase(cl CodeLocation) error { } } +/* By errors */ +func (g ginkgoErrors) ByNotDuringRunPhase(cl CodeLocation) error { + return GinkgoError{ + Heading: "Ginkgo detected an issue with your spec structure", + Message: formatter.F(`It looks like you are calling {{bold}}By{{/}} outside of a running spec. Make sure you call {{bold}}By{{/}} inside a runnable node such as It or BeforeEach and not inside the body of a container such as Describe or Context.`), + CodeLocation: cl, + DocLink: "documenting-complex-specs-by", + } +} + /* FileFilter and SkipFilter errors */ func (g ginkgoErrors) InvalidFileFilter(filter string) error { return GinkgoError{ @@ -370,6 +442,24 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { } } +func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { + return GinkgoError{ + Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), + Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + CodeLocation: cl, + DocLink: "table-specs", + } +} + +func (g ginkgoErrors) IncorrectParameterTypeForTable(i int, name string, cl CodeLocation) error { + return GinkgoError{ + Heading: "DescribeTable passed incorrect parameter type", + Message: fmt.Sprintf("Parameter #%d passed to DescribeTable is of incorrect type <%s>", i, name), + CodeLocation: cl, + DocLink: "table-specs", + } +} + func (g ginkgoErrors) TooFewParametersToTableFunction(expected, actual int, kind string, cl CodeLocation) error { return GinkgoError{ Heading: fmt.Sprintf("Too few parameters passed in to %s", kind), @@ -424,16 +514,16 @@ func (g ginkgoErrors) SynchronizedBeforeSuiteFailedOnProc1() error { func (g ginkgoErrors) SynchronizedBeforeSuiteDisappearedOnProc1() error { return GinkgoError{ - Heading: "Process #1 disappeard before SynchronizedBeforeSuite could report back", + Heading: "Process #1 disappeared before SynchronizedBeforeSuite could report back", Message: "Ginkgo parallel process #1 disappeared before the first SynchronizedBeforeSuite function completed. This suite will now abort.", } } /* Configuration errors */ -func (g ginkgoErrors) UnkownTypePassedToRunSpecs(value interface{}) error { +func (g ginkgoErrors) UnknownTypePassedToRunSpecs(value interface{}) error { return GinkgoError{ - Heading: "Unkown Type pased to RunSpecs", + Heading: "Unknown Type passed to RunSpecs", Message: fmt.Sprintf("RunSpecs() accepts labels, and configuration of type types.SuiteConfig and/or types.ReporterConfig.\n You passed in: %v", value), } } @@ -479,6 +569,13 @@ func (g ginkgoErrors) DryRunInParallelConfiguration() error { } } +func (g ginkgoErrors) GracePeriodCannotBeZero() error { + return GinkgoError{ + Heading: "Ginkgo requires a positive --grace-period.", + Message: "Please set --grace-period to a positive duration. The default is 30s.", + } +} + func (g ginkgoErrors) ConflictingVerbosityConfiguration() error { return GinkgoError{ Heading: "Conflicting reporter verbosity settings.", @@ -513,3 +610,12 @@ func (g ginkgoErrors) BothRepeatAndUntilItFails() error { Message: "--until-it-fails directs Ginkgo to rerun specs indefinitely until they fail. --repeat directs Ginkgo to rerun specs a set number of times. You can't set both... which would you like?", } } + +/* Stack-Trace parsing errors */ + +func (g ginkgoErrors) FailedToParseStackTrace(message string) error { + return GinkgoError{ + Heading: "Failed to Parse Stack Trace", + Message: message, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 7d8a73fec..9186ae873 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -121,7 +121,7 @@ func NewAttachedGinkgoFlagSet(flagSet *flag.FlagSet, flags GinkgoFlags, bindings func bindFlagSet(f GinkgoFlagSet, flagSet *flag.FlagSet) (GinkgoFlagSet, error) { if flagSet == nil { f.flagSet = flag.NewFlagSet("", flag.ContinueOnError) - //supress all output as Ginkgo is reponsible for formatting usage + //suppress all output as Ginkgo is responsible for formatting usage f.flagSet.SetOutput(io.Discard) } else { f.flagSet = flagSet diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index a85281a4f..798bedc03 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -42,7 +42,7 @@ func (rev ReportEntryValue) String() string { } func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { - //All this to capture the representaiton at encoding-time, not creating time + //All this to capture the representation at encoding-time, not creating time //This way users can Report on pointers and get their final values at reporting-time out := struct { AsJSON string @@ -50,7 +50,6 @@ func (rev ReportEntryValue) MarshalJSON() ([]byte, error) { }{ Representation: rev.String(), } - asJSON, err := json.Marshal(rev.raw) if err != nil { return nil, err @@ -98,7 +97,7 @@ type ReportEntry struct { Value ReportEntryValue } -// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableStirng() is used to generate their representation. +// ColorableStringer is an interface that ReportEntry values can satisfy. If they do then ColorableString() is used to generate their representation. type ColorableStringer interface { ColorableString() string } @@ -121,6 +120,8 @@ func (entry ReportEntry) GetRawValue() interface{} { return entry.Value.GetRawValue() } + + type ReportEntries []ReportEntry func (re ReportEntries) HasVisibility(visibilities ...ReportEntryVisibility) bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index 42b5644cb..9fc4425fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -22,10 +22,10 @@ type Report struct { //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. - //If false, the test run is considered unsucccessful + //If false, the test run is considered unsuccessful SuiteSucceeded bool - //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programatically focused + //SuiteHasProgrammaticFocus captures whether the test suite has a test or set of tests that are programmatically focused //(i.e an `FIt` or an `FDescribe` SuiteHasProgrammaticFocus bool @@ -67,7 +67,7 @@ type PreRunStats struct { SpecsThatWillRun int } -//Add is ued by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +//Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes //to form a complete final report. func (report Report) Add(other Report) Report { report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded @@ -151,10 +151,17 @@ type SpecReport struct { //It includes detailed information about the Failure Failure Failure - // NumAttempts captures the number of times this Spec was run. Flakey specs can be retried with - // ginkgo --flake-attempts=N + // NumAttempts captures the number of times this Spec was run. + // Flakey specs can be retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + // Repeated specs can be retried with the use of the MustPassRepeatedly decorator NumAttempts int + // MaxFlakeAttempts captures whether the spec has been retried with ginkgo --flake-attempts=N or the use of the FlakeAttempts decorator. + MaxFlakeAttempts int + + // MaxMustPassRepeatedly captures whether the spec has the MustPassRepeatedly decorator + MaxMustPassRepeatedly int + // CapturedGinkgoWriterOutput contains text printed to the GinkgoWriter CapturedGinkgoWriterOutput string @@ -165,6 +172,12 @@ type SpecReport struct { // ReportEntries contains any reports added via `AddReportEntry` ReportEntries ReportEntries + + // ProgressReports contains any progress reports generated during this spec. These can either be manually triggered, or automatically generated by Ginkgo via the PollProgressAfter() decorator + ProgressReports []ProgressReport + + // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. + AdditionalFailures []AdditionalFailure } func (report SpecReport) MarshalJSON() ([]byte, error) { @@ -184,9 +197,13 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { ParallelProcess int Failure *Failure `json:",omitempty"` NumAttempts int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` }{ ContainerHierarchyTexts: report.ContainerHierarchyTexts, ContainerHierarchyLocations: report.ContainerHierarchyLocations, @@ -203,6 +220,8 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { Failure: nil, ReportEntries: nil, NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, CapturedStdOutErr: report.CapturedStdOutErr, } @@ -213,6 +232,12 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { if len(report.ReportEntries) > 0 { out.ReportEntries = report.ReportEntries } + if len(report.ProgressReports) > 0 { + out.ProgressReports = report.ProgressReports + } + if len(report.AdditionalFailures) > 0 { + out.AdditionalFailures = report.AdditionalFailures + } return json.Marshal(out) } @@ -349,11 +374,22 @@ func (reports SpecReports) CountWithState(states SpecState) int { return n } -//CountWithState returns the number of SpecReports that passed after multiple attempts +//If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. func (reports SpecReports) CountOfFlakedSpecs() int { n := 0 for i := range reports { - if reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + if reports[i].MaxFlakeAttempts > 1 && reports[i].State.Is(SpecStatePassed) && reports[i].NumAttempts > 1 { + n += 1 + } + } + return n +} + +//If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +func (reports SpecReports) CountOfRepeatedSpecs() int { + n := 0 + for i := range reports { + if reports[i].MaxMustPassRepeatedly > 1 && reports[i].State.Is(SpecStateFailureStates) && reports[i].NumAttempts > 1 { n += 1 } } @@ -365,7 +401,7 @@ type Failure struct { // Message - the failure message passed into Fail(...). When using a matcher library // like Gomega, this will contain the failure message generated by Gomega. // - // Message is also populated if hte user has called Skip(...). + // Message is also populated if the user has called Skip(...). Message string // Location - the CodeLocation where the failure occurred @@ -376,10 +412,10 @@ type Failure struct { // then ForwardedPanic will be populated with a string representation of the captured panic. ForwardedPanic string `json:",omitempty"` - // FailureNodeContext - one of three contexts describing the node in which the failure occured: - // FailureNodeIsLeafNode means the failure occured in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated - // FailureNodeAtTopLevel means the failure occured in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. - // FailureNodeInContainer means the failure occured in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocaiton, and FailureNodeContainerIndex will be populated. + // FailureNodeContext - one of three contexts describing the node in which the failure occurred: + // FailureNodeIsLeafNode means the failure occurred in the leaf node of the associated SpecReport. None of the other FailureNode fields will be populated + // FailureNodeAtTopLevel means the failure occurred in a non-leaf node that is defined at the top-level of the spec (i.e. not in a container). FailureNodeType and FailureNodeLocation will be populated. + // FailureNodeInContainer means the failure occurred in a non-leaf node that is defined within a container. FailureNodeType, FailureNodeLocation, and FailureNodeContainerIndex will be populated. // // FailureNodeType will contain the NodeType of the node in which the failure occurred. // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. @@ -388,10 +424,13 @@ type Failure struct { FailureNodeType NodeType FailureNodeLocation CodeLocation FailureNodeContainerIndex int + + //ProgressReport is populated if the spec was interrupted or timed out + ProgressReport ProgressReport } func (f Failure) IsZero() bool { - return f == Failure{} + return f.Message == "" && (f.Location == CodeLocation{}) } // FailureNodeContext captures the location context for the node containing the failing line of code @@ -424,6 +463,14 @@ func (fnc FailureNodeContext) MarshalJSON() ([]byte, error) { return fncEnumSupport.MarshJSON(uint(fnc)) } +// AdditionalFailure capturs any additional failures that occur after the initial failure of a psec +// these typically occur in clean up nodes after the spec has failed. +// We can't simply use Failure as we want to track the SpecState to know what kind of failure this is +type AdditionalFailure struct { + State SpecState + Failure Failure +} + // SpecState captures the state of a spec // To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` type SpecState uint @@ -438,6 +485,7 @@ const ( SpecStateAborted SpecStatePanicked SpecStateInterrupted + SpecStateTimedout ) var ssEnumSupport = NewEnumSupport(map[uint]string{ @@ -449,6 +497,7 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{ uint(SpecStateAborted): "aborted", uint(SpecStatePanicked): "panicked", uint(SpecStateInterrupted): "interrupted", + uint(SpecStateTimedout): "timedout", }) func (ss SpecState) String() string { @@ -463,12 +512,113 @@ func (ss SpecState) MarshalJSON() ([]byte, error) { return ssEnumSupport.MarshJSON(uint(ss)) } -var SpecStateFailureStates = SpecStateFailed | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted +var SpecStateFailureStates = SpecStateFailed | SpecStateTimedout | SpecStateAborted | SpecStatePanicked | SpecStateInterrupted func (ss SpecState) Is(states SpecState) bool { return ss&states != 0 } +// ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace +type ProgressReport struct { + Message string + ParallelProcess int + RunningInParallel bool + + Time time.Time + + ContainerHierarchyTexts []string + LeafNodeText string + LeafNodeLocation CodeLocation + SpecStartTime time.Time + + CurrentNodeType NodeType + CurrentNodeText string + CurrentNodeLocation CodeLocation + CurrentNodeStartTime time.Time + + CurrentStepText string + CurrentStepLocation CodeLocation + CurrentStepStartTime time.Time + + AdditionalReports []string + + CapturedGinkgoWriterOutput string `json:",omitempty"` + GinkgoWriterOffset int + + Goroutines []Goroutine +} + +func (pr ProgressReport) IsZero() bool { + return pr.CurrentNodeType == NodeTypeInvalid +} + +func (pr ProgressReport) SpecGoroutine() Goroutine { + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine { + return goroutine + } + } + return Goroutine{} +} + +func (pr ProgressReport) HighlightedGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || !goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) OtherGoroutines() []Goroutine { + out := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + continue + } + out = append(out, goroutine) + } + return out +} + +func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { + out := pr + out.CapturedGinkgoWriterOutput = "" + return out +} + +type Goroutine struct { + ID uint64 + State string + Stack []FunctionCall + IsSpecGoroutine bool +} + +func (g Goroutine) IsZero() bool { + return g.ID == 0 +} + +func (g Goroutine) HasHighlights() bool { + for _, fc := range g.Stack { + if fc.Highlight { + return true + } + } + + return false +} + +type FunctionCall struct { + Function string + Filename string + Line int + Highlight bool `json:",omitempty"` + Source []string `json:",omitempty"` + SourceHighlight int `json:",omitempty"` +} + // NodeType captures the type of a given Ginkgo Node type NodeType uint @@ -503,6 +653,8 @@ const ( var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportAfterSuite var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeInvalid): "INVALID NODE TYPE", @@ -521,8 +673,8 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeReportBeforeEach): "ReportBeforeEach", uint(NodeTypeReportAfterEach): "ReportAfterEach", uint(NodeTypeReportAfterSuite): "ReportAfterSuite", - uint(NodeTypeCleanupInvalid): "INVALID CLEANUP NODE", - uint(NodeTypeCleanupAfterEach): "DeferCleanup", + uint(NodeTypeCleanupInvalid): "DeferCleanup", + uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", uint(NodeTypeCleanupAfterAll): "DeferCleanup (All)", uint(NodeTypeCleanupAfterSuite): "DeferCleanup (Suite)", }) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 00781a440..7ba384a09 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.0.0" +const VERSION = "2.4.0" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 720c13cba..5f12ff053 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,3 +3,4 @@ . .idea gomega.iml +TODO.md \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index e3b437985..e088dc078 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,136 @@ +## 1.23.0 + +### Features +- Custom formatting on a per-type basis can be provided using `format.RegisterCustomFormatter()` -- see the docs [here](https://onsi.github.io/gomega/#adjusting-output) + +- Substantial improvement have been made to `StopTrying()`: + - Users can now use `StopTrying().Wrap(err)` to wrap errors and `StopTrying().Attach(description, object)` to attach arbitrary objects to the `StopTrying()` error + - `StopTrying()` is now always interpreted as a failure. If you are an early adopter of `StopTrying()` you may need to change your code as the prior version would match against the returned value even if `StopTrying()` was returned. Going forward the `StopTrying()` api should remain stable. + - `StopTrying()` and `StopTrying().Now()` can both be used in matchers - not just polled functions. + +- `TryAgainAfter(duration)` is used like `StopTrying()` but instructs `Eventually` and `Consistently` that the poll should be tried again after the specified duration. This allows you to dynamically adjust the polling duration. + +- `ctx` can now be passed-in as the first argument to `Eventually` and `Consistently`. + +## Maintenance + +- Bump github.com/onsi/ginkgo/v2 from 2.3.0 to 2.3.1 (#597) [afed901] +- Bump nokogiri from 1.13.8 to 1.13.9 in /docs (#599) [7c691b3] +- Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 (#587) [ff22665] + +## 1.22.1 + +## Fixes +- When passed a context and no explicit timeout, Eventually will only timeout when the context is cancelled [e5105cf] +- Allow StopTrying() to be wrapped [bf3cba9] + +## Maintenance +- bump to ginkgo v2.3.0 [c5d5c39] + +## 1.22.0 + +### Features + +Several improvements have been made to `Eventually` and `Consistently` in this and the most recent releases: + +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] +- Eventually/Consistently will forward an attached context to functions that ask for one [e2091c5] +- Eventually/Consistently supports passing arguments to functions via WithArguments() [a2dc7c3] +- Eventually and Consistently can now be stopped early with StopTrying(message) and StopTrying(message).Now() [52976bb] + +These improvements are all documented in [Gomega's docs](https://onsi.github.io/gomega/#making-asynchronous-assertions) + +## Fixes + +## Maintenance + +## 1.21.1 + +### Features +- Eventually and Consistently that are passed a SpecContext can provide reports when an interrupt occurs [0d063c9] + +## 1.21.0 + +### Features +- Eventually and Consistently can take a context.Context [65c01bc] + This enables integration with Ginkgo 2.3.0's interruptible nodes and node timeouts. +- Introduces Eventually.Within.ProbeEvery with tests and documentation (#591) [f633800] +- New BeKeyOf matcher with documentation and unit tests (#590) [fb586b3] + +## Fixes +- Cover the entire gmeasure suite with leak detection [8c54344] +- Fix gmeasure leak [119d4ce] +- Ignore new Ginkgo ProgressSignal goroutine in gleak [ba548e2] + +## Maintenance + +- Fixes crashes on newer Ruby 3 installations by upgrading github-pages gem dependency (#596) [12469a0] + + +## 1.20.2 + +## Fixes +- label specs that rely on remote access; bump timeout on short-circuit test to make it less flaky [35eeadf] +- gexec: allow more headroom for SIGABRT-related unit tests (#581) [5b78f40] +- Enable reading from a closed gbytes.Buffer (#575) [061fd26] + +## Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.1.5 to 2.1.6 (#583) [55d895b] +- Bump github.com/onsi/ginkgo/v2 from 2.1.4 to 2.1.5 (#582) [346de7c] + +## 1.20.1 + +## Fixes +- fix false positive gleaks when using ginkgo -p (#577) [cb46517] +- Fix typos in gomega_dsl.go (#569) [5f71ed2] +- don't panic on Eventually(nil), fixing #555 (#567) [9d1186f] +- vet optional description args in assertions, fixing #560 (#566) [8e37808] + +## Maintenance +- test: add new Go 1.19 to test matrix (#571) [40d7efe] +- Bump tzinfo from 1.2.9 to 1.2.10 in /docs (#564) [5f26371] + +## 1.20.0 + +## Features +- New [`gleak`](https://onsi.github.io/gomega/#codegleakcode-finding-leaked-goroutines) experimental goroutine leak detection package! (#538) [85ba7bc] +- New `BeComparableTo` matcher(#546) that uses `gocmp` to make comparisons [e77ea75] +- New `HaveExistingField` matcher (#553) [fd130e1] +- Document how to wrap Gomega (#539) [56714a4] + +## Fixes +- Support pointer receivers in HaveField; fixes #543 (#544) [8dab36e] + +## Maintenance +- Bump various dependencies: + - Upgrade to yaml.v3 (#556) [f5a83b1] + - Bump github/codeql-action from 1 to 2 (#549) [52f5adf] + - Bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#551) [5f3942d] + - Bump nokogiri from 1.13.4 to 1.13.6 in /docs (#554) [eb4b4c2] + - Use latest ginkgo (#535) [1c29028] + - Bump nokogiri from 1.13.3 to 1.13.4 in /docs (#541) [1ce84d5] + - Bump actions/setup-go from 2 to 3 (#540) [755485e] + - Bump nokogiri from 1.12.5 to 1.13.3 in /docs (#522) [4fbb0dc] + - Bump actions/checkout from 2 to 3 (#526) [ac49202] + +## 1.19.0 + +## Features +- New [`HaveEach`](https://onsi.github.io/gomega/#haveeachelement-interface) matcher to ensure that each and every element in an `array`, `slice`, or `map` satisfies the passed in matcher. (#523) [9fc2ae2] (#524) [c8ba582] +- Users can now wrap the `Gomega` interface to implement custom behavior on each assertion. (#521) [1f2e714] +- [`ContainElement`](https://onsi.github.io/gomega/#containelementelement-interface) now accepts an additional pointer argument. Elements that satisfy the matcher are stored in the pointer enabling developers to easily add subsequent, more detailed, assertions against the matching element. (#527) [1a4e27f] + +## Fixes +- update RELEASING instructions to match ginkgo [0917cde] +- Bump github.com/onsi/ginkgo/v2 from 2.0.0 to 2.1.3 (#519) [49ab4b0] +- Fix CVE-2021-38561 (#534) [f1b4456] +- Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497] +- Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5] +- Fix for Go 1.18 (#532) [56d2a29] +- Document precendence of timeouts (#533) [b607941] + ## 1.18.1 ## Fixes diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 998d64ee7..7153b9b94 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -1,12 +1,23 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: 1. Ensure CHANGELOG.md is up to date. - - Use `git log --pretty=format:'- %s [%h]' HEAD...vX.X.X` to list all the commits since the last release + - Use + ```bash + LAST_VERSION=$(git tag --sort=version:refname | tail -n1) + CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n## Fixes\n\n## Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + ``` + to update the changelog - Categorize the changes into - Breaking Changes (requires a major version) - New Features (minor version) - Fixes (fix version) - Maintenance (which in general should not be mentioned in `CHANGELOG.md` as they have no user impact) -2. Update GOMEGA_VERSION in `gomega_dsl.go` -3. Push a commit with the version number as the commit message (e.g. `v1.3.0`) -4. Create a new [GitHub release](https://help.github.com/articles/creating-releases/) with the version number as the tag (e.g. `v1.3.0`). List the key changes in the release notes. +1. Update GOMEGA_VERSION in `gomega_dsl.go` +1. Commit, push, and release: + ``` + git commit -m "vM.m.p" + git push + gh release create "vM.m.p" + git fetch --tags origin master + ``` \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6e78c391d..1a2ed877a 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -65,6 +65,52 @@ type GomegaStringer interface { GomegaString() string } +/* +CustomFormatters can be registered with Gomega via RegisterCustomFormatter() +Any value to be rendered by Gomega is passed to each registered CustomFormatters. +The CustomFormatter signals that it will handle formatting the value by returning (formatted-string, true) +If the CustomFormatter does not want to handle the object it should return ("", false) + +Strings returned by CustomFormatters are not truncated +*/ +type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatterKey uint + +var customFormatterKey CustomFormatterKey = 1 + +type customFormatterKeyPair struct { + CustomFormatter + CustomFormatterKey +} + +/* +RegisterCustomFormatter registers a CustomFormatter and returns a CustomFormatterKey + +You can call UnregisterCustomFormatter with the returned key to unregister the associated CustomFormatter +*/ +func RegisterCustomFormatter(customFormatter CustomFormatter) CustomFormatterKey { + key := customFormatterKey + customFormatterKey += 1 + customFormatters = append(customFormatters, customFormatterKeyPair{customFormatter, key}) + return key +} + +/* +UnregisterCustomFormatter unregisters a previously registered CustomFormatter. You should pass in the key returned by RegisterCustomFormatter +*/ +func UnregisterCustomFormatter(key CustomFormatterKey) { + formatters := []customFormatterKeyPair{} + for _, f := range customFormatters { + if f.CustomFormatterKey == key { + continue + } + formatters = append(formatters, f) + } + customFormatters = formatters +} + +var customFormatters = []customFormatterKeyPair{} + /* Generates a formatted matcher success/failure message of the form: @@ -219,17 +265,24 @@ func Object(object interface{}, indentation uint) string { IndentString takes a string and indents each line by the specified amount. */ func IndentString(s string, indentation uint) string { + return indentString(s, indentation, true) +} + +func indentString(s string, indentation uint, indentFirstLine bool) string { + result := &strings.Builder{} components := strings.Split(s, "\n") - result := "" indent := strings.Repeat(Indent, int(indentation)) for i, component := range components { - result += indent + component + if i > 0 || indentFirstLine { + result.WriteString(indent) + } + result.WriteString(component) if i < len(components)-1 { - result += "\n" + result.WriteString("\n") } } - return result + return result.String() } func formatType(v reflect.Value) string { @@ -261,18 +314,27 @@ func formatValue(value reflect.Value, indentation uint) string { if value.CanInterface() { obj := value.Interface() + // if a CustomFormatter handles this values, we'll go with that + for _, customFormatter := range customFormatters { + formatted, handled := customFormatter.CustomFormatter(obj) + // do not truncate a user-provided CustomFormatter() + if handled { + return indentString(formatted, indentation+1, false) + } + } + // GomegaStringer will take precedence to other representations and disregards UseStringerRepresentation if x, ok := obj.(GomegaStringer); ok { - // do not truncate a user-defined GoMegaString() value - return x.GomegaString() + // do not truncate a user-defined GomegaString() value + return indentString(x.GomegaString(), indentation+1, false) } if UseStringerRepresentation { switch x := obj.(type) { case fmt.GoStringer: - return truncateLongStrings(x.GoString()) + return indentString(truncateLongStrings(x.GoString()), indentation+1, false) case fmt.Stringer: - return truncateLongStrings(x.String()) + return indentString(truncateLongStrings(x.String()), indentation+1, false) } } } diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 6936e2411..e236a40f4 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.18.1" +const GOMEGA_VERSION = "1.23.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -34,7 +34,7 @@ Depending on your vendoring solution you may be inadvertently importing gomega a // to abstract between the standard package-level function implementations // and alternatives like *WithT. // -// The types in the top-level DSL have gotten a bit messy due to earlier depracations that avoid stuttering +// The types in the top-level DSL have gotten a bit messy due to earlier deprecations that avoid stuttering // and due to an accidental use of a concrete type (*WithT) in an earlier release. // // As of 1.15 both the WithT and Ginkgo variants of Gomega are implemented by the same underlying object @@ -52,7 +52,7 @@ var Default = Gomega(internal.NewGomega(internal.FetchDefaultDurationBundle())) // rich ecosystem of matchers without causing a test to fail. For example, to aggregate a series of potential failures // or for use in a non-test setting. func NewGomega(fail types.GomegaFailHandler) Gomega { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithFailHandler(fail) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithFailHandler(fail) } // WithT wraps a *testing.T and provides `Expect`, `Eventually`, and `Consistently` methods. This allows you to leverage @@ -69,17 +69,31 @@ type WithT = internal.Gomega // GomegaWithT is deprecated in favor of gomega.WithT, which does not stutter. type GomegaWithT = WithT -// NewWithT takes a *testing.T and returngs a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with +// inner is an interface that allows users to provide a wrapper around Default. The wrapper +// must implement the inner interface and return either the original Default or the result of +// a call to NewGomega(). +type inner interface { + Inner() Gomega +} + +func internalGomega(g Gomega) *internal.Gomega { + if v, ok := g.(inner); ok { + return v.Inner().(*internal.Gomega) + } + return g.(*internal.Gomega) +} + +// NewWithT takes a *testing.T and returns a `gomega.WithT` allowing you to use `Expect`, `Eventually`, and `Consistently` along with // Gomega's rich ecosystem of matchers in standard `testing` test suits. // -// func TestFarmHasCow(t *testing.T) { -// g := gomega.NewWithT(t) +// func TestFarmHasCow(t *testing.T) { +// g := gomega.NewWithT(t) // -// f := farm.New([]string{"Cow", "Horse"}) -// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") -// } +// f := farm.New([]string{"Cow", "Horse"}) +// g.Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") +// } func NewWithT(t types.GomegaTestingT) *WithT { - return internal.NewGomega(Default.(*internal.Gomega).DurationBundle).ConfigureWithT(t) + return internal.NewGomega(internalGomega(Default).DurationBundle).ConfigureWithT(t) } // NewGomegaWithT is deprecated in favor of gomega.NewWithT, which does not stutter. @@ -88,37 +102,37 @@ var NewGomegaWithT = NewWithT // RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails // the fail handler passed into RegisterFailHandler is called. func RegisterFailHandler(fail types.GomegaFailHandler) { - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterFailHandlerWithT is deprecated and will be removed in a future release. // users should use RegisterFailHandler, or RegisterTestingT func RegisterFailHandlerWithT(_ types.GomegaTestingT, fail types.GomegaFailHandler) { fmt.Println("RegisterFailHandlerWithT is deprecated. Please use RegisterFailHandler or RegisterTestingT instead.") - Default.(*internal.Gomega).ConfigureWithFailHandler(fail) + internalGomega(Default).ConfigureWithFailHandler(fail) } // RegisterTestingT connects Gomega to Golang's XUnit style // Testing.T tests. It is now deprecated and you should use NewWithT() instead to get a fresh instance of Gomega for each test. func RegisterTestingT(t types.GomegaTestingT) { - Default.(*internal.Gomega).ConfigureWithT(t) + internalGomega(Default).ConfigureWithT(t) } // InterceptGomegaFailures runs a given callback and returns an array of // failure messages generated by any Gomega assertions within the callback. -// Exeuction continues after the first failure allowing users to collect all failures +// Execution continues after the first failure allowing users to collect all failures // in the callback. // // This is most useful when testing custom matchers, but can also be used to check // on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { - originalHandler := Default.(*internal.Gomega).Fail + originalHandler := internalGomega(Default).Fail failures := []string{} - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + internalGomega(Default).Fail = func(message string, callerSkip ...int) { failures = append(failures, message) } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler }() f() return failures @@ -131,14 +145,14 @@ func InterceptGomegaFailures(f func()) []string { // does not register a failure with the FailHandler registered via RegisterFailHandler - it is up // to the user to decide what to do with the returned error func InterceptGomegaFailure(f func()) (err error) { - originalHandler := Default.(*internal.Gomega).Fail - Default.(*internal.Gomega).Fail = func(message string, callerSkip ...int) { + originalHandler := internalGomega(Default).Fail + internalGomega(Default).Fail = func(message string, callerSkip ...int) { err = errors.New(message) panic("stop execution") } defer func() { - Default.(*internal.Gomega).Fail = originalHandler + internalGomega(Default).Fail = originalHandler if e := recover(); e != nil { if err == nil { panic(e) @@ -151,13 +165,14 @@ func InterceptGomegaFailure(f func()) (err error) { } func ensureDefaultGomegaIsConfigured() { - if !Default.(*internal.Gomega).IsConfigured() { + if !internalGomega(Default).IsConfigured() { panic(nilGomegaPanic) } } // Ω wraps an actual value allowing assertions to be made on it: -// Ω("foo").Should(Equal("foo")) +// +// Ω("foo").Should(Equal("foo")) // // If Ω is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -166,10 +181,13 @@ func ensureDefaultGomegaIsConfigured() { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Ω(MyAmazingThing()).Should(Equal(3)) +// +// Ω(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical @@ -179,7 +197,8 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { } // Expect wraps an actual value allowing assertions to be made on it: -// Expect("foo").To(Equal("foo")) +// +// Expect("foo").To(Equal("foo")) // // If Expect is passed more than one argument it will pass the *first* argument to the matcher. // All subsequent arguments will be required to be nil/zero. @@ -188,10 +207,13 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // a value and an error - a common patter in Go. // // For example, given a function with signature: -// func MyAmazingThing() (int, error) +// +// func MyAmazingThing() (int, error) // // Then: -// Expect(MyAmazingThing()).Should(Equal(3)) +// +// Expect(MyAmazingThing()).Should(Equal(3)) +// // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical @@ -201,7 +223,8 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { } // ExpectWithOffset wraps an actual value allowing assertions to be made on it: -// ExpectWithOffset(1, "foo").To(Equal("foo")) +// +// ExpectWithOffset(1, "foo").To(Equal("foo")) // // Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument // that is used to modify the call-stack offset when computing line numbers. It is @@ -219,7 +242,7 @@ func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Asse Eventually enables making assertions on asynchronous behavior. Eventually checks that an assertion *eventually* passes. Eventually blocks when called and attempts an assertion periodically until it passes or a timeout occurs. Both the timeout and polling interval are configurable as optional arguments. -The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). +The first optional argument is the timeout (which defaults to 1s), the second is the polling interval (which defaults to 10ms). Both intervals can be specified as time.Duration, parsable duration strings or floats/integers (in which case they are interpreted as seconds). In addition an optional context.Context can be passed in - Eventually will keep trying until either the timeout epxires or the context is cancelled, whichever comes first. Eventually works with any Gomega compatible matcher and supports making assertions against three categories of actual value: @@ -227,15 +250,15 @@ Eventually works with any Gomega compatible matcher and supports making assertio There are several examples of values that can change over time. These can be passed in to Eventually and will be passed to the matcher repeatedly until a match occurs. For example: - c := make(chan bool) - go DoStuff(c) - Eventually(c, "50ms").Should(BeClosed()) + c := make(chan bool) + go DoStuff(c) + Eventually(c, "50ms").Should(BeClosed()) will poll the channel repeatedly until it is closed. In this example `Eventually` will block until either the specified timeout of 50ms has elapsed or the channel is closed, whichever comes first. -Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfuly via: +Several Gomega libraries allow you to use Eventually in this way. For example, the gomega/gexec package allows you to block until a *gexec.Session exits successfully via: - Eventually(session).Should(gexec.Exit(0)) + Eventually(session).Should(gexec.Exit(0)) And the gomega/gbytes package allows you to monitor a streaming *gbytes.Buffer until a given string is seen: @@ -252,33 +275,57 @@ this will trigger Go's race detector as the goroutine polling via Eventually wil **Category 2: Make Eventually assertions on functions** -Eventually can be passed functions that **take no arguments** and **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. +Eventually can be passed functions that **return at least one value**. When configured this way, Eventually will poll the function repeatedly and pass the first returned value to the matcher. For example: - Eventually(func() int { - return client.FetchCount() - }).Should(BeNumerically(">=", 17)) + Eventually(func() int { + return client.FetchCount() + }).Should(BeNumerically(">=", 17)) - will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) + will repeatedly poll client.FetchCount until the BeNumerically matcher is satisfied. (Note that this example could have been written as Eventually(client.FetchCount).Should(BeNumerically(">=", 17))) -If multple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common patternin Go. +If multiple values are returned by the function, Eventually will pass the first value to the matcher and require that all others are zero-valued. This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. For example, consider a method that returns a value and an error: - func FetchFromDB() (string, error) + + func FetchFromDB() (string, error) Then - Eventually(FetchFromDB).Should(Equal("got it")) + + Eventually(FetchFromDB).Should(Equal("got it")) will pass only if and when the returned error is nil *and* the returned string satisfies the matcher. -It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. You should design your functions with this in mind. +Eventually can also accept functions that take arguments, however you must provide those arguments using .WithArguments(). For example, consider a function that takes a user-id and makes a network request to fetch a full name: + + func FetchFullName(userId int) (string, error) + +You can poll this function like so: + + Eventually(FetchFullName).WithArguments(1138).Should(Equal("Wookie")) + +It is important to note that the function passed into Eventually is invoked *synchronously* when polled. Eventually does not (in fact, it cannot) kill the function if it takes longer to return than Eventually's configured timeout. A common practice here is to use a context. Here's an example that combines Ginkgo's spec timeout support with Eventually: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(ctx, func() int { + return client.FetchCount(ctx, "/users") + }).Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in contexts play nicely with paseed-in arguments as long as the context appears first. You can rewrite the above example as: + + It("fetches the correct count", func(ctx SpecContext) { + Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) + }, SpecTimeout(time.Second)) + +Either way the context passd to Eventually is also passed to the underlying funciton. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. **Category 3: Making assertions _in_ the function passed into Eventually** When testing complex systems it can be valuable to assert that a _set_ of assertions passes Eventually. Eventually supports this by accepting functions that take a single Gomega argument and return zero or more values. -Here's an example that makes some asssertions and returns a value and error: +Here's an example that makes some assertions and returns a value and error: Eventually(func(g Gomega) (Widget, error) { ids, err := client.FetchIDs() @@ -292,22 +339,38 @@ will pass only if all the assertions in the polled function pass and the return Eventually also supports a special case polling function that takes a single Gomega argument and returns no values. Eventually assumes such a function is making assertions and is designed to work with the Succeed matcher to validate that all assertions have passed. For example: - Eventually(func(g Gomega) { - model, err := client.Find(1138) - g.Expect(err).NotTo(HaveOccurred()) - g.Expect(model.Reticulate()).To(Succeed()) - g.Expect(model.IsReticulated()).To(BeTrue()) - g.Expect(model.Save()).To(Succeed()) - }).Should(Succeed()) + Eventually(func(g Gomega) { + model, err := client.Find(1138) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(model.Reticulate()).To(Succeed()) + g.Expect(model.IsReticulated()).To(BeTrue()) + g.Expect(model.Save()).To(Succeed()) + }).Should(Succeed()) will rerun the function until all assertions pass. -`Eventually` specifying a timeout interval (and an optional polling interval) are -the same as `Eventually(...).WithTimeout` or `Eventually(...).WithTimeout(...).WithPolling`. +You can also pass additional arugments to functions that take a Gomega. The only rule is that the Gomega argument must be first. If you also want to pass the context attached to Eventually you must ensure that is the second argument. For example: + + Eventually(func(g Gomega, ctx context.Context, path string, expected ...string){ + tok, err := client.GetToken(ctx) + g.Expect(err).NotTo(HaveOccurred()) + + elements, err := client.Fetch(ctx, tok, path) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(elements).To(ConsistOf(expected)) + }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) + +Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: + + Eventually(..., "1s", "2s", ctx).Should(...) + +is equivalent to + + Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Eventually(args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(actual, intervals...) + return Default.Eventually(args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -319,9 +382,9 @@ func Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, actual, intervals...) + return Default.EventuallyWithOffset(offset, args...) } /* @@ -329,19 +392,19 @@ Consistently, like Eventually, enables making assertions on asynchronous behavio Consistently blocks when called for a specified duration. During that duration Consistently repeatedly polls its matcher and ensures that it is satisfied. If the matcher is consistently satisfied, then Consistently will pass. Otherwise Consistently will fail. -Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional arugment is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. +Both the total waiting duration and the polling interval are configurable as optional arguments. The first optional argument is the duration that Consistently will run for (defaults to 100ms), and the second argument is the polling interval (defaults to 10ms). As with Eventually, these intervals can be passed in as time.Duration, parsable duration strings or an integer or float number of seconds. You can also pass in an optional context.Context - Consistently will exit early (with a failure) if the context is cancelled before the waiting duration expires. Consistently accepts the same three categories of actual as Eventually, check the Eventually docs to learn more. Consistently is useful in cases where you want to assert that something *does not happen* for a period of time. For example, you may want to assert that a goroutine does *not* send data down a channel. In this case you could write: - Consistently(channel, "200ms").ShouldNot(Receive()) + Consistently(channel, "200ms").ShouldNot(Receive()) This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { +func Consistently(args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(actual, intervals...) + return Default.Consistently(args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -350,11 +413,54 @@ func Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, actual, intervals...) + return Default.ConsistentlyWithOffset(offset, args...) } +/* +StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. + +You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. + +You can also wrap StopTrying around an error with `StopTrying("message").Wrap(err)` and can attach additional objects via `StopTrying("message").Attach("description", object). When rendered, the signal will include the wrapped error and any attached objects rendered using Gomega's default formatting. + +Here are a couple of examples. This is how you might use StopTrying() as an error to signal that Eventually should stop: + + playerIndex, numPlayers := 0, 11 + Eventually(func() (string, error) { + if playerIndex == numPlayers { + return "", StopTrying("no more players left") + } + name := client.FetchPlayer(playerIndex) + playerIndex += 1 + return name, nil + }).Should(Equal("Patrick Mahomes")) + +And here's an example where `StopTrying().Now()` is called to halt execution immediately: + + Eventually(func() []string { + names, err := client.FetchAllPlayers() + if err == client.IRRECOVERABLE_ERROR { + StopTrying("Irrecoverable error occurred").Wrap(err).Now() + } + return names + }).Should(ContainElement("Patrick Mahomes")) +*/ +var StopTrying = internal.StopTrying + +/* +TryAgainAfter() allows you to adjust the polling interval for the _next_ iteration of `Eventually` or `Consistently`. Like `StopTrying` you can either return `TryAgainAfter` as an error or trigger it immedieately with `.Now()` + +When `TryAgainAfter(` is triggered `Eventually` and `Consistently` will wait for that duration. If a timeout occurs before the next poll is triggered both `Eventually` and `Consistently` will always fail with the content of the TryAgainAfter message. As with StopTrying you can `.Wrap()` and error and `.Attach()` additional objects to `TryAgainAfter`. +*/ +var TryAgainAfter = internal.TryAgainAfter + +/* +PollingSignalError is the error returned by StopTrying() and TryAgainAfter() +*/ +type PollingSignalError = internal.PollingSignalError + // SetDefaultEventuallyTimeout sets the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { Default.SetDefaultEventuallyTimeout(t) @@ -388,8 +494,8 @@ func SetDefaultConsistentlyPollingInterval(t time.Duration) { // // Example: // -// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") -// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) +// Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") +// Consistently(myChannel).ShouldNot(Receive(), func() string { return "Nothing should have come down the pipe." }) type AsyncAssertion = types.AsyncAssertion // GomegaAsyncAssertion is deprecated in favor of AsyncAssertion, which does not stutter. @@ -411,7 +517,7 @@ type GomegaAsyncAssertion = types.AsyncAssertion // // Example: // -// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) +// Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) type Assertion = types.Assertion // GomegaAssertion is deprecated in favor of Assertion, which does not stutter. diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go index b3c26889a..08356a610 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -4,6 +4,7 @@ import ( "fmt" "reflect" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) @@ -45,26 +46,31 @@ func (assertion *Assertion) Error() types.Assertion { func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } @@ -141,7 +147,12 @@ func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { if actual != nil { zeroValue := reflect.Zero(reflect.TypeOf(actual)).Interface() if !reflect.DeepEqual(zeroValue, actual) { - message := fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + var message string + if err, ok := actual.(error); ok { + message = fmt.Sprintf("Unexpected error: %s\n%s", err, format.Object(err, 1)) + } else { + message = fmt.Sprintf("Unexpected non-nil/non-zero argument at index %d:\n\t<%T>: %#v", i, actual, actual) + } return false, message } } diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 99f4ebcfe..c1e4a9995 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -1,15 +1,25 @@ package internal import ( - "errors" + "context" "fmt" "reflect" "runtime" + "sync" "time" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/types" ) +var errInterface = reflect.TypeOf((*error)(nil)).Elem() +var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() +var contextType = reflect.TypeOf(new(context.Context)).Elem() + +type contextWithAttachProgressReporter interface { + AttachProgressReporter(func() string) func() +} + type AsyncAssertionType uint const ( @@ -17,71 +27,43 @@ const ( AsyncAssertionTypeConsistently ) +func (at AsyncAssertionType) String() string { + switch at { + case AsyncAssertionTypeEventually: + return "Eventually" + case AsyncAssertionTypeConsistently: + return "Consistently" + } + return "INVALID ASYNC ASSERTION TYPE" +} + type AsyncAssertion struct { asyncType AsyncAssertionType - actualIsFunc bool - actualValue interface{} - actualFunc func() ([]reflect.Value, error) + actualIsFunc bool + actual interface{} + argsToForward []interface{} timeoutInterval time.Duration pollingInterval time.Duration + ctx context.Context offset int g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ asyncType: asyncType, timeoutInterval: timeoutInterval, pollingInterval: pollingInterval, offset: offset, + ctx: ctx, g: g, } - switch actualType := reflect.TypeOf(actualInput); { - case actualType.Kind() != reflect.Func: - out.actualValue = actualInput - case actualType.NumIn() == 0 && actualType.NumOut() > 0: - out.actualIsFunc = true - out.actualFunc = func() ([]reflect.Value, error) { - return reflect.ValueOf(actualInput).Call([]reflect.Value{}), nil - } - case actualType.NumIn() == 1 && actualType.In(0).Implements(reflect.TypeOf((*types.Gomega)(nil)).Elem()): + out.actual = actualInput + if actualInput != nil && reflect.TypeOf(actualInput).Kind() == reflect.Func { out.actualIsFunc = true - out.actualFunc = func() (values []reflect.Value, err error) { - var assertionFailure error - assertionCapturingGomega := NewGomega(g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { - skip := 0 - if len(callerSkip) > 0 { - skip = callerSkip[0] - } - _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") - }) - - defer func() { - if actualType.NumOut() == 0 { - if assertionFailure == nil { - values = []reflect.Value{reflect.Zero(reflect.TypeOf((*error)(nil)).Elem())} - } else { - values = []reflect.Value{reflect.ValueOf(assertionFailure)} - } - } else { - err = assertionFailure - } - if e := recover(); e != nil && assertionFailure == nil { - panic(e) - } - }() - - values = reflect.ValueOf(actualInput).Call([]reflect.Value{reflect.ValueOf(assertionCapturingGomega)}) - return - } - default: - msg := fmt.Sprintf("The function passed to Gomega's async assertions should either take no arguments and return values, or take a single Gomega interface that it can use to make assertions within the body of the function. When taking a Gomega interface the function can optionally return values or return nothing. The function you passed takes %d arguments and returns %d values.", actualType.NumIn(), actualType.NumOut()) - g.Fail(msg, offset+4) } return out @@ -102,13 +84,35 @@ func (assertion *AsyncAssertion) WithPolling(interval time.Duration) types.Async return assertion } +func (assertion *AsyncAssertion) Within(timeout time.Duration) types.AsyncAssertion { + assertion.timeoutInterval = timeout + return assertion +} + +func (assertion *AsyncAssertion) ProbeEvery(interval time.Duration) types.AsyncAssertion { + assertion.pollingInterval = interval + return assertion +} + +func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAssertion { + assertion.ctx = ctx + return assertion +} + +func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { + assertion.argsToForward = argsToForward + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, true, optionalDescription...) } func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() + vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, false, optionalDescription...) } @@ -124,54 +128,237 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) pollActual() (interface{}, error) { +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { + if len(values) == 0 { + return nil, fmt.Errorf("No values were returned by the function passed to Gomega") + } + + actual := values[0].Interface() + if _, ok := AsPollingSignalError(actual); ok { + return actual, actual.(error) + } + + var err error + for i, extraValue := range values[1:] { + extra := extraValue.Interface() + if extra == nil { + continue + } + if _, ok := AsPollingSignalError(extra); ok { + return actual, extra.(error) + } + extraType := reflect.TypeOf(extra) + zero := reflect.Zero(extraType).Interface() + if reflect.DeepEqual(extra, zero) { + continue + } + if i == len(values)-2 && extraType.Implements(errInterface) { + err = fmt.Errorf("function returned error: %w", extra.(error)) + } + if err == nil { + err = fmt.Errorf("Unexpected non-nil/non-zero return value at index %d:\n\t<%T>: %#v", i+1, extra, extra) + } + } + + return actual, err +} + +func (assertion *AsyncAssertion) invalidFunctionError(t reflect.Type) error { + return fmt.Errorf(`The function passed to %s had an invalid signature of %s. Functions passed to %s must either: + + (a) have return values or + (b) take a Gomega interface as their first argument and use that Gomega instance to make assertions. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, assertion.asyncType) +} + +func (assertion *AsyncAssertion) noConfiguredContextForFunctionError() error { + return fmt.Errorf(`The function passed to %s requested a context.Context, but no context has been provided. Please pass one in using %s().WithContext(). + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, assertion.asyncType) +} + +func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvided int) error { + have := "have" + if numProvided == 1 { + have = "has" + } + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) +} + +func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { if !assertion.actualIsFunc { - return assertion.actualValue, nil + return func() (interface{}, error) { return assertion.actual, nil }, nil } + actualValue := reflect.ValueOf(assertion.actual) + actualType := reflect.TypeOf(assertion.actual) + numIn, numOut, isVariadic := actualType.NumIn(), actualType.NumOut(), actualType.IsVariadic() - values, err := assertion.actualFunc() - if err != nil { - return nil, err + if numIn == 0 && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) + } + takesGomega, takesContext := false, false + if numIn > 0 { + takesGomega, takesContext = actualType.In(0).Implements(gomegaType), actualType.In(0).Implements(contextType) + } + if takesGomega && numIn > 1 && actualType.In(1).Implements(contextType) { + takesContext = true + } + if takesContext && len(assertion.argsToForward) > 0 && reflect.TypeOf(assertion.argsToForward[0]).Implements(contextType) { + takesContext = false + } + if !takesGomega && numOut == 0 { + return nil, assertion.invalidFunctionError(actualType) } - extras := []interface{}{nil} - for _, value := range values[1:] { - extras = append(extras, value.Interface()) + if takesContext && assertion.ctx == nil { + return nil, assertion.noConfiguredContextForFunctionError() } - success, message := vetActuals(extras, 0) - if !success { - return nil, errors.New(message) + + var assertionFailure error + inValues := []reflect.Value{} + if takesGomega { + inValues = append(inValues, reflect.ValueOf(NewGomega(assertion.g.DurationBundle).ConfigureWithFailHandler(func(message string, callerSkip ...int) { + skip := 0 + if len(callerSkip) > 0 { + skip = callerSkip[0] + } + _, file, line, _ := runtime.Caller(skip + 1) + assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) + panic("stop execution") + }))) + } + if takesContext { + inValues = append(inValues, reflect.ValueOf(assertion.ctx)) + } + for _, arg := range assertion.argsToForward { + inValues = append(inValues, reflect.ValueOf(arg)) } - return values[0].Interface(), nil + if !isVariadic && numIn != len(inValues) { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } else if isVariadic && len(inValues) < numIn-1 { + return nil, assertion.argumentMismatchError(actualType, len(inValues)) + } + + return func() (actual interface{}, err error) { + var values []reflect.Value + assertionFailure = nil + defer func() { + if numOut == 0 && takesGomega { + actual = assertionFailure + } else { + actual, err = assertion.processReturnValues(values) + _, isAsyncError := AsPollingSignalError(err) + if assertionFailure != nil && !isAsyncError { + err = assertionFailure + } + } + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else if assertionFailure == nil { + panic(e) + } + } + }() + values = actualValue.Call(inValues) + return + }, nil } -func (assertion *AsyncAssertion) matcherMayChange(matcher types.GomegaMatcher, value interface{}) bool { - if assertion.actualIsFunc { - return true +func (assertion *AsyncAssertion) afterTimeout() <-chan time.Time { + if assertion.timeoutInterval >= 0 { + return time.After(assertion.timeoutInterval) + } + + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyDuration) + } else { + if assertion.ctx == nil { + return time.After(assertion.g.DurationBundle.EventuallyTimeout) + } else { + return nil + } } - return types.MatchMayChangeInTheFuture(matcher, value) +} + +func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { + if assertion.pollingInterval >= 0 { + return time.After(assertion.pollingInterval) + } + if assertion.asyncType == AsyncAssertionTypeConsistently { + return time.After(assertion.g.DurationBundle.ConsistentlyPollingInterval) + } else { + return time.After(assertion.g.DurationBundle.EventuallyPollingInterval) + } +} + +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { + if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { + return false + } + return true +} + +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { + defer func() { + if e := recover(); e != nil { + if _, isAsyncError := AsPollingSignalError(e); isAsyncError { + err = e.(error) + } else { + panic(e) + } + } + }() + + matches, err = matcher.Match(value) + + return } func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { timer := time.Now() - timeout := time.After(assertion.timeoutInterval) + timeout := assertion.afterTimeout() + lock := sync.Mutex{} var matches bool var err error - mayChange := true - value, err := assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } + var oracleMatcherSaysStop bool assertion.g.THelper() - fail := func(preamble string) { - errMsg := "" + pollActual, err := assertion.buildActualPoller() + if err != nil { + assertion.g.Fail(err.Error(), 2+assertion.offset) + return false + } + + value, err := pollActual() + if err == nil { + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) + matches, err = assertion.pollMatcher(matcher, value) + } + + messageGenerator := func() string { + // can be called out of band by Ginkgo if the user requests a progress report + lock.Lock() + defer lock.Unlock() message := "" if err != nil { - errMsg = "Error: " + err.Error() + if pollingSignalErr, ok := AsPollingSignalError(err); ok && pollingSignalErr.IsStopTrying() { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) + } + } else { + message = "Error: " + err.Error() + "\n" + format.Object(err, 1) + } } else { if desiredMatch { message = matcher.FailureMessage(value) @@ -179,57 +366,90 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch message = matcher.NegatedFailureMessage(value) } } - assertion.g.THelper() description := assertion.buildDescription(optionalDescription...) - assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s%s%s", preamble, time.Since(timer).Seconds(), description, message, errMsg), 3+assertion.offset) + return fmt.Sprintf("%s%s", description, message) } - if assertion.asyncType == AsyncAssertionTypeEventually { - for { - if err == nil && matches == desiredMatch { - return true - } + fail := func(preamble string) { + assertion.g.THelper() + assertion.g.Fail(fmt.Sprintf("%s after %.3fs.\n%s", preamble, time.Since(timer).Seconds(), messageGenerator()), 3+assertion.offset) + } - if !mayChange { - fail("No future change is possible. Bailing out early") - return false - } + var contextDone <-chan struct{} + if assertion.ctx != nil { + contextDone = assertion.ctx.Done() + if v, ok := assertion.ctx.Value("GINKGO_SPEC_CONTEXT").(contextWithAttachProgressReporter); ok { + detach := v.AttachProgressReporter(messageGenerator) + defer detach() + } + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) - } - case <-timeout: - fail("Timed out") + for { + var nextPoll <-chan time.Time = nil + var isTryAgainAfterError = false + + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") return false } + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true + } } - } else if assertion.asyncType == AsyncAssertionTypeConsistently { - for { - if !(err == nil && matches == desiredMatch) { + + if err == nil && matches == desiredMatch { + if assertion.asyncType == AsyncAssertionTypeEventually { + return true + } + } else if !isTryAgainAfterError { + if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + } - if !mayChange { + if oracleMatcherSaysStop { + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("No future change is possible. Bailing out early") + return false + } else { return true } + } + + if nextPoll == nil { + nextPoll = assertion.afterPolling() + } - select { - case <-time.After(assertion.pollingInterval): - value, err = assertion.pollActual() - if err == nil { - mayChange = assertion.matcherMayChange(matcher, value) - matches, err = matcher.Match(value) + select { + case <-nextPoll: + v, e := pollActual() + lock.Lock() + value, err = v, e + lock.Unlock() + if err == nil { + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) + m, e := assertion.pollMatcher(matcher, value) + lock.Lock() + matches, err = m, e + lock.Unlock() + } + case <-contextDone: + fail("Context was cancelled") + return false + case <-timeout: + if assertion.asyncType == AsyncAssertionTypeEventually { + fail("Timed out") + return false + } else { + if isTryAgainAfterError { + fail("Timed out while waiting on TryAgainAfter") + return false } - case <-timeout: return true } } } - - return false } diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index af8d989fa..6e0d90d3a 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -44,28 +44,28 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) time.Duration { +func toDuration(input interface{}) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { - return duration + return duration, nil } value := reflect.ValueOf(input) kind := reflect.TypeOf(input).Kind() if reflect.Int <= kind && kind <= reflect.Int64 { - return time.Duration(value.Int()) * time.Second + return time.Duration(value.Int()) * time.Second, nil } else if reflect.Uint <= kind && kind <= reflect.Uint64 { - return time.Duration(value.Uint()) * time.Second + return time.Duration(value.Uint()) * time.Second, nil } else if reflect.Float32 <= kind && kind <= reflect.Float64 { - return time.Duration(value.Float() * float64(time.Second)) + return time.Duration(value.Float() * float64(time.Second)), nil } else if reflect.String == kind { duration, err := time.ParseDuration(value.String()) if err != nil { - panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) + return 0, fmt.Errorf("%#v is not a valid parsable duration string: %w", input, err) } - return duration + return duration, nil } - panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) + return 0, fmt.Errorf("%#v is not a valid interval. Must be a time.Duration, a parsable duration string, or a number.", input) } diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index d26a67485..e75d2626a 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -1,6 +1,8 @@ package internal import ( + "context" + "fmt" "time" "github.com/onsi/gomega/types" @@ -51,38 +53,68 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.EventuallyWithOffset(0, actual, intervals...) +func (g *Gomega) Eventually(args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.EventuallyTimeout - pollingInterval := g.DurationBundle.EventuallyPollingInterval - if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) - } - if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) - } +func (g *Gomega) EventuallyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, args...) +} - return NewAsyncAssertion(AsyncAssertionTypeEventually, actual, g, timeoutInterval, pollingInterval, offset) +func (g *Gomega) Consistently(args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, args...) } -func (g *Gomega) Consistently(actual interface{}, intervals ...interface{}) types.AsyncAssertion { - return g.ConsistentlyWithOffset(0, actual, intervals...) +func (g *Gomega) ConsistentlyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) types.AsyncAssertion { - timeoutInterval := g.DurationBundle.ConsistentlyDuration - pollingInterval := g.DurationBundle.ConsistentlyPollingInterval +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, args ...interface{}) types.AsyncAssertion { + baseOffset := 3 + timeoutInterval := -time.Duration(1) + pollingInterval := -time.Duration(1) + intervals := []interface{}{} + var ctx context.Context + if len(args) == 0 { + g.Fail(fmt.Sprintf("Call to %s is missing a value or function to poll", asyncAssertionType), offset+baseOffset) + return nil + } + + actual := args[0] + startingIndex := 1 + if _, isCtx := args[0].(context.Context); isCtx && len(args) > 1 { + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual + if _, err := toDuration(args[1]); err != nil { + ctx = args[0].(context.Context) + actual = args[1] + startingIndex = 2 + } + } + + for _, arg := range args[startingIndex:] { + switch v := arg.(type) { + case context.Context: + ctx = v + default: + intervals = append(intervals, arg) + } + } + var err error if len(intervals) > 0 { - timeoutInterval = toDuration(intervals[0]) + timeoutInterval, err = toDuration(intervals[0]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } if len(intervals) > 1 { - pollingInterval = toDuration(intervals[1]) + pollingInterval, err = toDuration(intervals[1]) + if err != nil { + g.Fail(err.Error(), offset+baseOffset) + } } - return NewAsyncAssertion(AsyncAssertionTypeConsistently, actual, g, timeoutInterval, pollingInterval, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go new file mode 100644 index 000000000..83b04b1a4 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -0,0 +1,106 @@ +package internal + +import ( + "errors" + "fmt" + "time" +) + +type PollingSignalErrorType int + +const ( + PollingSignalErrorTypeStopTrying PollingSignalErrorType = iota + PollingSignalErrorTypeTryAgainAfter +) + +type PollingSignalError interface { + error + Wrap(err error) PollingSignalError + Attach(description string, obj any) PollingSignalError + Now() +} + +var StopTrying = func(message string) PollingSignalError { + return &PollingSignalErrorImpl{ + message: message, + pollingSignalErrorType: PollingSignalErrorTypeStopTrying, + } +} + +var TryAgainAfter = func(duration time.Duration) PollingSignalError { + return &PollingSignalErrorImpl{ + message: fmt.Sprintf("told to try again after %s", duration), + duration: duration, + pollingSignalErrorType: PollingSignalErrorTypeTryAgainAfter, + } +} + +type PollingSignalErrorAttachment struct { + Description string + Object any +} + +type PollingSignalErrorImpl struct { + message string + wrappedErr error + pollingSignalErrorType PollingSignalErrorType + duration time.Duration + Attachments []PollingSignalErrorAttachment +} + +func (s *PollingSignalErrorImpl) Wrap(err error) PollingSignalError { + s.wrappedErr = err + return s +} + +func (s *PollingSignalErrorImpl) Attach(description string, obj any) PollingSignalError { + s.Attachments = append(s.Attachments, PollingSignalErrorAttachment{description, obj}) + return s +} + +func (s *PollingSignalErrorImpl) Error() string { + if s.wrappedErr == nil { + return s.message + } else { + return s.message + ": " + s.wrappedErr.Error() + } +} + +func (s *PollingSignalErrorImpl) Unwrap() error { + if s == nil { + return nil + } + return s.wrappedErr +} + +func (s *PollingSignalErrorImpl) Now() { + panic(s) +} + +func (s *PollingSignalErrorImpl) IsStopTrying() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeStopTrying +} + +func (s *PollingSignalErrorImpl) IsTryAgainAfter() bool { + return s.pollingSignalErrorType == PollingSignalErrorTypeTryAgainAfter +} + +func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { + return s.duration +} + +func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { + if actual == nil { + return nil, false + } + if actualErr, ok := actual.(error); ok { + var target *PollingSignalErrorImpl + if errors.As(actualErr, &target) { + return target, true + } else { + return nil, false + } + } + + return nil, false +} diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go new file mode 100644 index 000000000..f29587641 --- /dev/null +++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go @@ -0,0 +1,22 @@ +package internal + +import ( + "fmt" + + "github.com/onsi/gomega/types" +) + +// vetOptionalDescription vets the optional description args: if it finds any +// Gomega matcher at the beginning it panics. This allows for rendering Gomega +// matchers as part of an optional Description, as long as they're not in the +// first slot. +func vetOptionalDescription(assertion string, optionalDescription ...interface{}) { + if len(optionalDescription) == 0 { + return + } + if _, isGomegaMatcher := optionalDescription[0].(types.GomegaMatcher); isGomegaMatcher { + panic(fmt.Sprintf("%s has a GomegaMatcher as the first element of optionalDescription.\n\t"+ + "Do you mean to use And/Or/SatisfyAll/SatisfyAny to combine multiple matchers?", + assertion)) + } +} diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index b46e461a5..f9d9f2aad 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -3,139 +3,157 @@ package gomega import ( "time" + "github.com/google/go-cmp/cmp" "github.com/onsi/gomega/matchers" "github.com/onsi/gomega/types" ) -//Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about -//types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about +// types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func Equal(expected interface{}) types.GomegaMatcher { return &matchers.EqualMatcher{ Expected: expected, } } -//BeEquivalentTo is more lax than Equal, allowing equality between different types. -//This is done by converting actual to have the type of expected before -//attempting equality with reflect.DeepEqual. -//It is an error for actual and expected to be nil. Use BeNil() instead. +// BeEquivalentTo is more lax than Equal, allowing equality between different types. +// This is done by converting actual to have the type of expected before +// attempting equality with reflect.DeepEqual. +// It is an error for actual and expected to be nil. Use BeNil() instead. func BeEquivalentTo(expected interface{}) types.GomegaMatcher { return &matchers.BeEquivalentToMatcher{ Expected: expected, } } -//BeIdenticalTo uses the == operator to compare actual with expected. -//BeIdenticalTo is strict about types when performing comparisons. -//It is an error for both actual and expected to be nil. Use BeNil() instead. +// BeComparableTo uses gocmp.Equal to compare. You can pass cmp.Option as options. +// It is an error for actual and expected to be nil. Use BeNil() instead. +func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { + return &matchers.BeComparableToMatcher{ + Expected: expected, + Options: opts, + } +} + +// BeIdenticalTo uses the == operator to compare actual with expected. +// BeIdenticalTo is strict about types when performing comparisons. +// It is an error for both actual and expected to be nil. Use BeNil() instead. func BeIdenticalTo(expected interface{}) types.GomegaMatcher { return &matchers.BeIdenticalToMatcher{ Expected: expected, } } -//BeNil succeeds if actual is nil +// BeNil succeeds if actual is nil func BeNil() types.GomegaMatcher { return &matchers.BeNilMatcher{} } -//BeTrue succeeds if actual is true +// BeTrue succeeds if actual is true func BeTrue() types.GomegaMatcher { return &matchers.BeTrueMatcher{} } -//BeFalse succeeds if actual is false +// BeFalse succeeds if actual is false func BeFalse() types.GomegaMatcher { return &matchers.BeFalseMatcher{} } -//HaveOccurred succeeds if actual is a non-nil error -//The typical Go error checking pattern looks like: -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// HaveOccurred succeeds if actual is a non-nil error +// The typical Go error checking pattern looks like: +// +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) func HaveOccurred() types.GomegaMatcher { return &matchers.HaveOccurredMatcher{} } -//Succeed passes if actual is a nil error -//Succeed is intended to be used with functions that return a single error value. Instead of -// err := SomethingThatMightFail() -// Expect(err).ShouldNot(HaveOccurred()) +// Succeed passes if actual is a nil error +// Succeed is intended to be used with functions that return a single error value. Instead of +// +// err := SomethingThatMightFail() +// Expect(err).ShouldNot(HaveOccurred()) // -//You can write: -// Expect(SomethingThatMightFail()).Should(Succeed()) +// You can write: // -//It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect -//functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. -//This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. +// Expect(SomethingThatMightFail()).Should(Succeed()) +// +// It is a mistake to use Succeed with a function that has multiple return values. Gomega's Ω and Expect +// functions automatically trigger failure if any return values after the first return value are non-zero/non-nil. +// This means that Ω(MultiReturnFunc()).ShouldNot(Succeed()) can never pass. func Succeed() types.GomegaMatcher { return &matchers.SucceedMatcher{} } -//MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// +// These are valid use-cases: // -//These are valid use-cases: -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) // -//It is an error for err to be nil or an object that does not implement the Error interface +// It is an error for err to be nil or an object that does not implement the Error interface func MatchError(expected interface{}) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, } } -//BeClosed succeeds if actual is a closed channel. -//It is an error to pass a non-channel to BeClosed, it is also an error to pass nil +// BeClosed succeeds if actual is a closed channel. +// It is an error to pass a non-channel to BeClosed, it is also an error to pass nil // -//In order to check whether or not the channel is closed, Gomega must try to read from the channel -//(even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about -//values coming down the channel. +// In order to check whether or not the channel is closed, Gomega must try to read from the channel +// (even in the `ShouldNot(BeClosed())` case). You should keep this in mind if you wish to make subsequent assertions about +// values coming down the channel. // -//Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before -//asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). +// Also, if you are testing that a *buffered* channel is closed you must first read all values out of the channel before +// asserting that it is closed (it is not possible to detect that a buffered-channel has been closed until all its buffered values are read). // -//Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. +// Finally, as a corollary: it is an error to check whether or not a send-only channel is closed. func BeClosed() types.GomegaMatcher { return &matchers.BeClosedMatcher{} } -//Receive succeeds if there is a value to be received on actual. -//Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// Receive succeeds if there is a value to be received on actual. +// Actual must be a channel (and cannot be a send-only channel) -- anything else is an error. +// +// Receive returns immediately and never blocks: +// +// - If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +// - If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// +// - If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. // -//Receive returns immediately and never blocks: +// If you have a go-routine running in the background that will write to channel `c` you can: // -//- If there is nothing on the channel `c` then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// Eventually(c).Should(Receive()) // -//- If the channel `c` is closed then Expect(c).Should(Receive()) will fail and Ω(c).ShouldNot(Receive()) will pass. +// This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) // -//- If there is something on the channel `c` ready to be read, then Expect(c).Should(Receive()) will pass and Ω(c).ShouldNot(Receive()) will fail. +// A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: // -//If you have a go-routine running in the background that will write to channel `c` you can: -// Eventually(c).Should(Receive()) +// Consistently(c).ShouldNot(Receive()) // -//This will timeout if nothing gets sent to `c` (you can modify the timeout interval as you normally do with `Eventually`) +// You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: // -//A similar use-case is to assert that no go-routine writes to a channel (for a period of time). You can do this with `Consistently`: -// Consistently(c).ShouldNot(Receive()) +// Expect(c).Should(Receive(Equal("foo"))) // -//You can pass `Receive` a matcher. If you do so, it will match the received object against the matcher. For example: -// Expect(c).Should(Receive(Equal("foo"))) +// When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. // -//When given a matcher, `Receive` will always fail if there is nothing to be received on the channel. +// Passing Receive a matcher is especially useful when paired with Eventually: // -//Passing Receive a matcher is especially useful when paired with Eventually: +// Eventually(c).Should(Receive(ContainSubstring("bar"))) // -// Eventually(c).Should(Receive(ContainSubstring("bar"))) +// will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -//will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. +// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // -//Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: -// var myThing thing -// Eventually(thingChan).Should(Receive(&myThing)) -// Expect(myThing.Sprocket).Should(Equal("foo")) -// Expect(myThing.IsValid()).Should(BeTrue()) +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing)) +// Expect(myThing.Sprocket).Should(Equal("foo")) +// Expect(myThing.IsValid()).Should(BeTrue()) func Receive(args ...interface{}) types.GomegaMatcher { var arg interface{} if len(args) > 0 { @@ -147,27 +165,27 @@ func Receive(args ...interface{}) types.GomegaMatcher { } } -//BeSent succeeds if a value can be sent to actual. -//Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. -//In addition, actual must not be closed. +// BeSent succeeds if a value can be sent to actual. +// Actual must be a channel (and cannot be a receive-only channel) that can sent the type of the value passed into BeSent -- anything else is an error. +// In addition, actual must not be closed. // -//BeSent never blocks: +// BeSent never blocks: // -//- If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately -//- If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout -//- If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately +// - If the channel `c` is not ready to receive then Expect(c).Should(BeSent("foo")) will fail immediately +// - If the channel `c` is eventually ready to receive then Eventually(c).Should(BeSent("foo")) will succeed.. presuming the channel becomes ready to receive before Eventually's timeout +// - If the channel `c` is closed then Expect(c).Should(BeSent("foo")) and Ω(c).ShouldNot(BeSent("foo")) will both fail immediately // -//Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). -//Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. +// Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). +// Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. func BeSent(arg interface{}) types.GomegaMatcher { return &matchers.BeSentMatcher{ Arg: arg, } } -//MatchRegexp succeeds if actual is a string or stringer that matches the -//passed-in regexp. Optional arguments can be provided to construct a regexp -//via fmt.Sprintf(). +// MatchRegexp succeeds if actual is a string or stringer that matches the +// passed-in regexp. Optional arguments can be provided to construct a regexp +// via fmt.Sprintf(). func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { return &matchers.MatchRegexpMatcher{ Regexp: regexp, @@ -175,9 +193,9 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { } } -//ContainSubstring succeeds if actual is a string or stringer that contains the -//passed-in substring. Optional arguments can be provided to construct the substring -//via fmt.Sprintf(). +// ContainSubstring succeeds if actual is a string or stringer that contains the +// passed-in substring. Optional arguments can be provided to construct the substring +// via fmt.Sprintf(). func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ Substr: substr, @@ -185,9 +203,9 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { } } -//HavePrefix succeeds if actual is a string or stringer that contains the -//passed-in string as a prefix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HavePrefix succeeds if actual is a string or stringer that contains the +// passed-in string as a prefix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { return &matchers.HavePrefixMatcher{ Prefix: prefix, @@ -195,9 +213,9 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { } } -//HaveSuffix succeeds if actual is a string or stringer that contains the -//passed-in string as a suffix. Optional arguments can be provided to construct -//via fmt.Sprintf(). +// HaveSuffix succeeds if actual is a string or stringer that contains the +// passed-in string as a suffix. Optional arguments can be provided to construct +// via fmt.Sprintf(). func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { return &matchers.HaveSuffixMatcher{ Suffix: suffix, @@ -205,136 +223,177 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { } } -//MatchJSON succeeds if actual is a string or stringer of JSON that matches -//the expected JSON. The JSONs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchJSON succeeds if actual is a string or stringer of JSON that matches +// the expected JSON. The JSONs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchJSON(json interface{}) types.GomegaMatcher { return &matchers.MatchJSONMatcher{ JSONToMatch: json, } } -//MatchXML succeeds if actual is a string or stringer of XML that matches -//the expected XML. The XMLs are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like whitespaces shouldn't matter. +// MatchXML succeeds if actual is a string or stringer of XML that matches +// the expected XML. The XMLs are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like whitespaces shouldn't matter. func MatchXML(xml interface{}) types.GomegaMatcher { return &matchers.MatchXMLMatcher{ XMLToMatch: xml, } } -//MatchYAML succeeds if actual is a string or stringer of YAML that matches -//the expected YAML. The YAML's are decoded and the resulting objects are compared via -//reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. +// MatchYAML succeeds if actual is a string or stringer of YAML that matches +// the expected YAML. The YAML's are decoded and the resulting objects are compared via +// reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. func MatchYAML(yaml interface{}) types.GomegaMatcher { return &matchers.MatchYAMLMatcher{ YAMLToMatch: yaml, } } -//BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. +// BeEmpty succeeds if actual is empty. Actual must be of type string, array, map, chan, or slice. func BeEmpty() types.GomegaMatcher { return &matchers.BeEmptyMatcher{} } -//HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. +// HaveLen succeeds if actual has the passed-in length. Actual must be of type string, array, map, chan, or slice. func HaveLen(count int) types.GomegaMatcher { return &matchers.HaveLenMatcher{ Count: count, } } -//HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. +// HaveCap succeeds if actual has the passed-in capacity. Actual must be of type array, chan, or slice. func HaveCap(count int) types.GomegaMatcher { return &matchers.HaveCapMatcher{ Count: count, } } -//BeZero succeeds if actual is the zero value for its type or if actual is nil. +// BeZero succeeds if actual is the zero value for its type or if actual is nil. func BeZero() types.GomegaMatcher { return &matchers.BeZeroMatcher{} } -//ContainElement succeeds if actual contains the passed in element. -//By default ContainElement() uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// ContainElement succeeds if actual contains the passed in element. By default +// ContainElement() uses Equal() to perform the match, however a matcher can be +// passed in instead: // -//Actual must be an array, slice or map. -//For maps, ContainElement searches through the map's values. -func ContainElement(element interface{}) types.GomegaMatcher { +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubstring("Bar"))) +// +// Actual must be an array, slice or map. For maps, ContainElement searches +// through the map's values. +// +// If you want to have a copy of the matching element(s) found you can pass a +// pointer to a variable of the appropriate type. If the variable isn't a slice +// or map, then exactly one match will be expected and returned. If the variable +// is a slice or map, then at least one match is expected and all matches will be +// stored in the variable. +// +// var findings []string +// Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) +func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, + Result: result, } } -//BeElementOf succeeds if actual is contained in the passed in elements. -//BeElementOf() always uses Equal() to perform the match. -//When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves -//as the reverse of ContainElement() that operates with Equal() to perform the match. -// Expect(2).Should(BeElementOf([]int{1, 2})) -// Expect(2).Should(BeElementOf([2]int{1, 2})) -//Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): -// Expect(2).Should(BeElementOf(1, 2)) +// BeElementOf succeeds if actual is contained in the passed in elements. +// BeElementOf() always uses Equal() to perform the match. +// When the passed in elements are comprised of a single element that is either an Array or Slice, BeElementOf() behaves +// as the reverse of ContainElement() that operates with Equal() to perform the match. +// +// Expect(2).Should(BeElementOf([]int{1, 2})) +// Expect(2).Should(BeElementOf([2]int{1, 2})) +// +// Otherwise, BeElementOf() provides a syntactic sugar for Or(Equal(_), Equal(_), ...): +// +// Expect(2).Should(BeElementOf(1, 2)) // -//Actual must be typed. +// Actual must be typed. func BeElementOf(elements ...interface{}) types.GomegaMatcher { return &matchers.BeElementOfMatcher{ Elements: elements, } } -//ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. -//By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// BeKeyOf succeeds if actual is contained in the keys of the passed in map. +// BeKeyOf() always uses Equal() to perform the match between actual and the map keys. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false})) +func BeKeyOf(element interface{}) types.GomegaMatcher { + return &matchers.BeKeyOfMatcher{ + Map: element, + } +} + +// ConsistOf succeeds if actual contains precisely the elements passed into the matcher. The ordering of the elements does not matter. +// By default ConsistOf() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -//Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf("FooBar", "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf(ContainSubstring("Foo"), ContainSubstring("Foo"))) // -//You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it -//is the only element passed in to ConsistOf: +// Actual must be an array, slice or map. For maps, ConsistOf matches against the map's values. // -// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// You typically pass variadic arguments to ConsistOf (as in the examples above). However, if you need to pass in a slice you can provided that it +// is the only element passed in to ConsistOf: // -//Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. +// Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) +// +// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. func ConsistOf(elements ...interface{}) types.GomegaMatcher { return &matchers.ConsistOfMatcher{ Elements: elements, } } -//ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. -//By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. +// By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) -// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements("FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(ContainElements(ContainSubstring("Bar"), "Foo")) // -//Actual must be an array, slice or map. -//For maps, ContainElements searches through the map's values. +// Actual must be an array, slice or map. +// For maps, ContainElements searches through the map's values. func ContainElements(elements ...interface{}) types.GomegaMatcher { return &matchers.ContainElementsMatcher{ Elements: elements, } } -//HaveKey succeeds if actual is a map with the passed in key. -//By default HaveKey uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) +// HaveEach succeeds if actual solely contains elements that match the passed in element. +// Please note that if actual is empty, HaveEach always will succeed. +// By default HaveEach() uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect([]string{"Foo", "FooBar"}).Should(HaveEach(ContainSubstring("Foo"))) +// +// Actual must be an array, slice or map. +// For maps, HaveEach searches through the map's values. +func HaveEach(element interface{}) types.GomegaMatcher { + return &matchers.HaveEachMatcher{ + Element: element, + } +} + +// HaveKey succeeds if actual is a map with the passed in key. +// By default HaveKey uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) func HaveKey(key interface{}) types.GomegaMatcher { return &matchers.HaveKeyMatcher{ Key: key, } } -//HaveKeyWithValue succeeds if actual is a map with the passed in key and value. -//By default HaveKeyWithValue uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) -// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) +// HaveKeyWithValue succeeds if actual is a map with the passed in key and value. +// By default HaveKeyWithValue uses Equal() to perform the match, however a +// matcher can be passed in instead: +// +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) +// Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { return &matchers.HaveKeyWithValueMatcher{ Key: key, @@ -342,27 +401,27 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { } } -//HaveField succeeds if actual is a struct and the value at the passed in field -//matches the passed in matcher. By default HaveField used Equal() to perform the match, -//however a matcher can be passed in in stead. -// -//The field must be a string that resolves to the name of a field in the struct. Structs can be traversed -//using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. -//Such methods must take no arguments and return a single value: -// -// type Book struct { -// Title string -// Author Person -// } -// type Person struct { -// FirstName string -// LastName string -// DOB time.Time -// } -// Expect(book).To(HaveField("Title", "Les Miserables")) -// Expect(book).To(HaveField("Title", ContainSubstring("Les")) -// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) -// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) +// HaveField succeeds if actual is a struct and the value at the passed in field +// matches the passed in matcher. By default HaveField used Equal() to perform the match, +// however a matcher can be passed in in stead. +// +// The field must be a string that resolves to the name of a field in the struct. Structs can be traversed +// using the '.' delimiter. If the field ends with '()' a method named field is assumed to exist on the struct and is invoked. +// Such methods must take no arguments and return a single value: +// +// type Book struct { +// Title string +// Author Person +// } +// type Person struct { +// FirstName string +// LastName string +// DOB time.Time +// } +// Expect(book).To(HaveField("Title", "Les Miserables")) +// Expect(book).To(HaveField("Title", ContainSubstring("Les")) +// Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) +// Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) func HaveField(field string, expected interface{}) types.GomegaMatcher { return &matchers.HaveFieldMatcher{ Field: field, @@ -370,6 +429,19 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { } } +// HaveExistingField succeeds if actual is a struct and the specified field +// exists. +// +// HaveExistingField can be combined with HaveField in order to cover use cases +// with optional fields. HaveField alone would trigger an error in such situations. +// +// Expect(MrHarmless).NotTo(And(HaveExistingField("Title"), HaveField("Title", "Supervillain"))) +func HaveExistingField(field string) types.GomegaMatcher { + return &matchers.HaveExistingFieldMatcher{ + Field: field, + } +} + // HaveValue applies the given matcher to the value of actual, optionally and // repeatedly dereferencing pointers or taking the concrete value of interfaces. // Thus, the matcher will always be applied to non-pointer and non-interface @@ -381,26 +453,27 @@ func HaveField(field string, expected interface{}) types.GomegaMatcher { // be a pointer (as gstruct.PointTo does) but instead also accepts non-pointer // and even interface values. // -// actual := 42 -// Expect(actual).To(HaveValue(42)) -// Expect(&actual).To(HaveValue(42)) +// actual := 42 +// Expect(actual).To(HaveValue(42)) +// Expect(&actual).To(HaveValue(42)) func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.HaveValueMatcher{ Matcher: matcher, } } -//BeNumerically performs numerical assertions in a type-agnostic way. -//Actual and expected should be numbers, though the specific type of -//number is irrelevant (float32, float64, uint8, etc...). +// BeNumerically performs numerical assertions in a type-agnostic way. +// Actual and expected should be numbers, though the specific type of +// number is irrelevant (float32, float64, uint8, etc...). +// +// There are six, self-explanatory, supported comparators: // -//There are six, self-explanatory, supported comparators: -// Expect(1.0).Should(BeNumerically("==", 1)) -// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) -// Expect(1.0).Should(BeNumerically(">", 0.9)) -// Expect(1.0).Should(BeNumerically(">=", 1.0)) -// Expect(1.0).Should(BeNumerically("<", 3)) -// Expect(1.0).Should(BeNumerically("<=", 1.0)) +// Expect(1.0).Should(BeNumerically("==", 1)) +// Expect(1.0).Should(BeNumerically("~", 0.999, 0.01)) +// Expect(1.0).Should(BeNumerically(">", 0.9)) +// Expect(1.0).Should(BeNumerically(">=", 1.0)) +// Expect(1.0).Should(BeNumerically("<", 3)) +// Expect(1.0).Should(BeNumerically("<=", 1.0)) func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { return &matchers.BeNumericallyMatcher{ Comparator: comparator, @@ -408,10 +481,11 @@ func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatc } } -//BeTemporally compares time.Time's like BeNumerically -//Actual and expected must be time.Time. The comparators are the same as for BeNumerically -// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) -// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) +// BeTemporally compares time.Time's like BeNumerically +// Actual and expected must be time.Time. The comparators are the same as for BeNumerically +// +// Expect(time.Now()).Should(BeTemporally(">", time.Time{})) +// Expect(time.Now()).Should(BeTemporally("~", time.Now(), time.Second)) func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Duration) types.GomegaMatcher { return &matchers.BeTemporallyMatcher{ Comparator: comparator, @@ -420,58 +494,61 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura } } -//BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. -//It will return an error when one of the values is nil. -// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values -// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type -// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type -// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) +// BeAssignableToTypeOf succeeds if actual is assignable to the type of expected. +// It will return an error when one of the values is nil. +// +// Expect(0).Should(BeAssignableToTypeOf(0)) // Same values +// Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type +// Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type +// Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { return &matchers.AssignableToTypeOfMatcher{ Expected: expected, } } -//Panic succeeds if actual is a function that, when invoked, panics. -//Actual must be a function that takes no arguments and returns no results. +// Panic succeeds if actual is a function that, when invoked, panics. +// Actual must be a function that takes no arguments and returns no results. func Panic() types.GomegaMatcher { return &matchers.PanicMatcher{} } -//PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. -//Actual must be a function that takes no arguments and returns no results. +// PanicWith succeeds if actual is a function that, when invoked, panics with a specific value. +// Actual must be a function that takes no arguments and returns no results. +// +// By default PanicWith uses Equal() to perform the match, however a +// matcher can be passed in instead: // -//By default PanicWith uses Equal() to perform the match, however a -//matcher can be passed in instead: -// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) +// Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) func PanicWith(expected interface{}) types.GomegaMatcher { return &matchers.PanicMatcher{Expected: expected} } -//BeAnExistingFile succeeds if a file exists. -//Actual must be a string representing the abs path to the file being checked. +// BeAnExistingFile succeeds if a file exists. +// Actual must be a string representing the abs path to the file being checked. func BeAnExistingFile() types.GomegaMatcher { return &matchers.BeAnExistingFileMatcher{} } -//BeARegularFile succeeds if a file exists and is a regular file. -//Actual must be a string representing the abs path to the file being checked. +// BeARegularFile succeeds if a file exists and is a regular file. +// Actual must be a string representing the abs path to the file being checked. func BeARegularFile() types.GomegaMatcher { return &matchers.BeARegularFileMatcher{} } -//BeADirectory succeeds if a file exists and is a directory. -//Actual must be a string representing the abs path to the file being checked. +// BeADirectory succeeds if a file exists and is a directory. +// Actual must be a string representing the abs path to the file being checked. func BeADirectory() types.GomegaMatcher { return &matchers.BeADirectoryMatcher{} } -//HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. -//Actual must be either a *http.Response or *httptest.ResponseRecorder. -//Expected must be either an int or a string. -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 -// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" -// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 +// HaveHTTPStatus succeeds if the Status or StatusCode field of an HTTP response matches. +// Actual must be either a *http.Response or *httptest.ResponseRecorder. +// Expected must be either an int or a string. +// +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 +// Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" +// Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } @@ -494,63 +571,70 @@ func HaveHTTPBody(expected interface{}) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } -//And succeeds only if all of the given matchers succeed. -//The matchers are tried in order, and will fail-fast if one doesn't succeed. -// Expect("hi").To(And(HaveLen(2), Equal("hi")) +// And succeeds only if all of the given matchers succeed. +// The matchers are tried in order, and will fail-fast if one doesn't succeed. +// +// Expect("hi").To(And(HaveLen(2), Equal("hi")) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func And(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.AndMatcher{Matchers: ms} } -//SatisfyAll is an alias for And(). -// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) +// SatisfyAll is an alias for And(). +// +// Expect("hi").Should(SatisfyAll(HaveLen(2), Equal("hi"))) func SatisfyAll(matchers ...types.GomegaMatcher) types.GomegaMatcher { return And(matchers...) } -//Or succeeds if any of the given matchers succeed. -//The matchers are tried in order and will return immediately upon the first successful match. -// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) +// Or succeeds if any of the given matchers succeed. +// The matchers are tried in order and will return immediately upon the first successful match. +// +// Expect("hi").To(Or(HaveLen(3), HaveLen(2)) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Or(ms ...types.GomegaMatcher) types.GomegaMatcher { return &matchers.OrMatcher{Matchers: ms} } -//SatisfyAny is an alias for Or(). -// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) +// SatisfyAny is an alias for Or(). +// +// Expect("hi").SatisfyAny(Or(HaveLen(3), HaveLen(2)) func SatisfyAny(matchers ...types.GomegaMatcher) types.GomegaMatcher { return Or(matchers...) } -//Not negates the given matcher; it succeeds if the given matcher fails. -// Expect(1).To(Not(Equal(2)) +// Not negates the given matcher; it succeeds if the given matcher fails. // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// Expect(1).To(Not(Equal(2)) +// +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func Not(matcher types.GomegaMatcher) types.GomegaMatcher { return &matchers.NotMatcher{Matcher: matcher} } -//WithTransform applies the `transform` to the actual value and matches it against `matcher`. -//The given transform must be either a function of one parameter that returns one value or a +// WithTransform applies the `transform` to the actual value and matches it against `matcher`. +// The given transform must be either a function of one parameter that returns one value or a // function of one parameter that returns two values, where the second value must be of the // error type. -// var plus1 = func(i int) int { return i + 1 } -// Expect(1).To(WithTransform(plus1, Equal(2)) // -// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } -// Expect(1).To(WithTransform(failingplus1, Equal(2))) +// var plus1 = func(i int) int { return i + 1 } +// Expect(1).To(WithTransform(plus1, Equal(2)) +// +// var failingplus1 = func(i int) (int, error) { return 42, "this does not compute" } +// Expect(1).To(WithTransform(failingplus1, Equal(2))) // -//And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. +// And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } -//Satisfy matches the actual value against the `predicate` function. -//The given predicate must be a function of one paramter that returns bool. -// var isEven = func(i int) bool { return i%2 == 0 } -// Expect(2).To(Satisfy(isEven)) +// Satisfy matches the actual value against the `predicate` function. +// The given predicate must be a function of one paramter that returns bool. +// +// var isEven = func(i int) bool { return i%2 == 0 } +// Expect(2).To(Satisfy(isEven)) func Satisfy(predicate interface{}) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go new file mode 100644 index 000000000..8ab4bb919 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -0,0 +1,49 @@ +package matchers + +import ( + "bytes" + "fmt" + + "github.com/google/go-cmp/cmp" + "github.com/onsi/gomega/format" +) + +type BeComparableToMatcher struct { + Expected interface{} + Options cmp.Options +} + +func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) { + if actual == nil && matcher.Expected == nil { + return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") + } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } + + defer func() { + if r := recover(); r != nil { + success = false + if err, ok := r.(error); ok { + matchErr = err + } else if errMsg, ok := r.(string); ok { + matchErr = fmt.Errorf(errMsg) + } + } + }() + + return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil +} + +func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { + return cmp.Diff(matcher.Expected, actual, matcher.Options) +} + +func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to equal", matcher.Expected) +} diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go new file mode 100644 index 000000000..449a291ef --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go @@ -0,0 +1,45 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type BeKeyOfMatcher struct { + Map interface{} +} + +func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) { + if !isMap(matcher.Map) { + return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type") + } + + if reflect.TypeOf(actual) == nil { + return false, fmt.Errorf("BeKeyOf matcher expects actual to be typed") + } + + var lastError error + for _, key := range reflect.ValueOf(matcher.Map).MapKeys() { + matcher := &EqualMatcher{Expected: key.Interface()} + success, err := matcher.Match(actual) + if err != nil { + lastError = err + continue + } + if success { + return true, nil + } + } + + return false, lastError +} + +func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map))) +} + +func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map))) +} diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 8d6c44c7a..3d45c9ebc 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -11,6 +12,7 @@ import ( type ContainElementMatcher struct { Element interface{} + Result []interface{} } func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { @@ -18,6 +20,49 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e return false, fmt.Errorf("ContainElement matcher expects an array/slice/map. Got:\n%s", format.Object(actual, 1)) } + var actualT reflect.Type + var result reflect.Value + switch l := len(matcher.Result); { + case l > 1: + return false, errors.New("ContainElement matcher expects at most a single optional pointer to store its findings at") + case l == 1: + if reflect.ValueOf(matcher.Result[0]).Kind() != reflect.Ptr { + return false, fmt.Errorf("ContainElement matcher expects a non-nil pointer to store its findings at. Got\n%s", + format.Object(matcher.Result[0], 1)) + } + actualT = reflect.TypeOf(actual) + resultReference := matcher.Result[0] + result = reflect.ValueOf(resultReference).Elem() // what ResultReference points to, to stash away our findings + switch result.Kind() { + case reflect.Array: + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.SliceOf(actualT.Elem()).String(), result.Type().String()) + case reflect.Slice: + if !isArrayOrSlice(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + reflect.MapOf(actualT.Key(), actualT.Elem()).String(), result.Type().String()) + } + if !actualT.Elem().AssignableTo(result.Type().Elem()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + case reflect.Map: + if !isMap(actual) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + if !actualT.AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.String(), result.Type().String()) + } + default: + if !actualT.Elem().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return findings. Need *%s, got *%s", + actualT.Elem().String(), result.Type().String()) + } + } + } + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) if !elementIsMatcher { elemMatcher = &EqualMatcher{Expected: matcher.Element} @@ -25,30 +70,99 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e value := reflect.ValueOf(actual) var valueAt func(int) interface{} + + var getFindings func() reflect.Value + var foundAt func(int) + if isMap(actual) { keys := value.MapKeys() valueAt = func(i int) interface{} { return value.MapIndex(keys[i]).Interface() } + if result.Kind() != reflect.Invalid { + fm := reflect.MakeMap(actualT) + getFindings = func() reflect.Value { + return fm + } + foundAt = func(i int) { + fm.SetMapIndex(keys[i], value.MapIndex(keys[i])) + } + } } else { valueAt = func(i int) interface{} { return value.Index(i).Interface() } + if result.Kind() != reflect.Invalid { + var f reflect.Value + if result.Kind() == reflect.Slice { + f = reflect.MakeSlice(result.Type(), 0, 0) + } else { + f = reflect.MakeSlice(reflect.SliceOf(result.Type()), 0, 0) + } + getFindings = func() reflect.Value { + return f + } + foundAt = func(i int) { + f = reflect.Append(f, value.Index(i)) + } + } } var lastError error for i := 0; i < value.Len(); i++ { - success, err := elemMatcher.Match(valueAt(i)) + elem := valueAt(i) + success, err := elemMatcher.Match(elem) if err != nil { lastError = err continue } if success { - return true, nil + if result.Kind() == reflect.Invalid { + return true, nil + } + foundAt(i) } } - return false, lastError + // when the expectation isn't interested in the findings except for success + // or non-success, then we're done here and return the last matcher error + // seen, if any, as well as non-success. + if result.Kind() == reflect.Invalid { + return false, lastError + } + + // pick up any findings the test is interested in as it specified a non-nil + // result reference. However, the expection always is that there are at + // least one or multiple findings. So, if a result is expected, but we had + // no findings, then this is an error. + findings := getFindings() + if findings.Len() == 0 { + return false, lastError + } + + // there's just a single finding and the result is neither a slice nor a map + // (so it's a scalar): pick the one and only finding and return it in the + // place the reference points to. + if findings.Len() == 1 && !isArrayOrSlice(result.Interface()) && !isMap(result.Interface()) { + if isMap(actual) { + miter := findings.MapRange() + miter.Next() + result.Set(miter.Value()) + } else { + result.Set(findings.Index(0)) + } + return true, nil + } + + // at least one or even multiple findings and a the result references a + // slice or a map, so all we need to do is to store our findings where the + // reference points to. + if !findings.Type().AssignableTo(result.Type()) { + return false, fmt.Errorf("ContainElement cannot return multiple findings. Need *%s, got *%s", + findings.Type().String(), result.Type().String()) + } + result.Set(findings) + return true, nil } func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go new file mode 100644 index 000000000..025b6e1ac --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -0,0 +1,65 @@ +package matchers + +import ( + "fmt" + "reflect" + + "github.com/onsi/gomega/format" +) + +type HaveEachMatcher struct { + Element interface{} +} + +func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { + if !isArrayOrSlice(actual) && !isMap(actual) { + return false, fmt.Errorf("HaveEach matcher expects an array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + elemMatcher, elementIsMatcher := matcher.Element.(omegaMatcher) + if !elementIsMatcher { + elemMatcher = &EqualMatcher{Expected: matcher.Element} + } + + value := reflect.ValueOf(actual) + if value.Len() == 0 { + return false, fmt.Errorf("HaveEach matcher expects a non-empty array/slice/map. Got:\n%s", + format.Object(actual, 1)) + } + + var valueAt func(int) interface{} + if isMap(actual) { + keys := value.MapKeys() + valueAt = func(i int) interface{} { + return value.MapIndex(keys[i]).Interface() + } + } else { + valueAt = func(i int) interface{} { + return value.Index(i).Interface() + } + } + + // if there are no elements, then HaveEach will match. + for i := 0; i < value.Len(); i++ { + success, err := elemMatcher.Match(valueAt(i)) + if err != nil { + return false, err + } + if !success { + return false, nil + } + } + + return true, nil +} + +// FailureMessage returns a suitable failure message. +func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) { + return format.Message(actual, "to contain element matching", matcher.Element) +} + +// NegatedFailureMessage returns a suitable negated failure message. +func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain element matching", matcher.Element) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go new file mode 100644 index 000000000..b57018745 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go @@ -0,0 +1,36 @@ +package matchers + +import ( + "errors" + "fmt" + + "github.com/onsi/gomega/format" +) + +type HaveExistingFieldMatcher struct { + Field string +} + +func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) { + // we don't care about the field's actual value, just about any error in + // trying to find the field (or method). + _, err = extractField(actual, matcher.Field, "HaveExistingField") + if err == nil { + return true, nil + } + var mferr missingFieldError + if errors.As(err, &mferr) { + // missing field errors aren't errors in this context, but instead + // unsuccessful matches. + return false, nil + } + return false, err +} + +func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field) +} + +func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field) +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index e1fe934d5..6989f78c4 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -8,7 +8,16 @@ import ( "github.com/onsi/gomega/format" ) -func extractField(actual interface{}, field string) (interface{}, error) { +// missingFieldError represents a missing field extraction error that +// HaveExistingFieldMatcher can ignore, as opposed to other, sever field +// extraction errors, such as nil pointers, et cetera. +type missingFieldError string + +func (e missingFieldError) Error() string { + return string(e) +} + +func extractField(actual interface{}, field string, matchername string) (interface{}, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -16,36 +25,39 @@ func extractField(actual interface{}, field string) (interface{}, error) { actualValue = actualValue.Elem() } if actualValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField encountered nil while dereferencing a pointer of type %T.", actual) + return nil, fmt.Errorf("%s encountered nil while dereferencing a pointer of type %T.", matchername, actual) } if actualValue.Kind() != reflect.Struct { - return nil, fmt.Errorf("HaveField encountered:\n%s\nWhich is not a struct.", format.Object(actual, 1)) + return nil, fmt.Errorf("%s encountered:\n%s\nWhich is not a struct.", matchername, format.Object(actual, 1)) } var extractedValue reflect.Value if strings.HasSuffix(fields[0], "()") { extractedValue = actualValue.MethodByName(strings.TrimSuffix(fields[0], "()")) + if extractedValue == (reflect.Value{}) && actualValue.CanAddr() { + extractedValue = actualValue.Addr().MethodByName(strings.TrimSuffix(fields[0], "()")) + } if extractedValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField could not find method named '%s' in struct of type %T.", fields[0], actual) + return nil, missingFieldError(fmt.Sprintf("%s could not find method named '%s' in struct of type %T.", matchername, fields[0], actual)) } t := extractedValue.Type() if t.NumIn() != 0 || t.NumOut() != 1 { - return nil, fmt.Errorf("HaveField found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", fields[0], actual) + return nil, fmt.Errorf("%s found an invalid method named '%s' in struct of type %T.\nMethods must take no arguments and return exactly one value.", matchername, fields[0], actual) } extractedValue = extractedValue.Call([]reflect.Value{})[0] } else { extractedValue = actualValue.FieldByName(fields[0]) if extractedValue == (reflect.Value{}) { - return nil, fmt.Errorf("HaveField could not find field named '%s' in struct:\n%s", fields[0], format.Object(actual, 1)) + return nil, missingFieldError(fmt.Sprintf("%s could not find field named '%s' in struct:\n%s", matchername, fields[0], format.Object(actual, 1))) } } if len(fields) == 1 { return extractedValue.Interface(), nil } else { - return extractField(extractedValue.Interface(), fields[1]) + return extractField(extractedValue.Interface(), fields[1], matchername) } } @@ -58,7 +70,7 @@ type HaveFieldMatcher struct { } func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { - matcher.extractedField, err = extractField(actual, matcher.Field) + matcher.extractedField, err = extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err } diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 0c83c2b63..2cb6b47db 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) type MatchYAMLMatcher struct { diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index c315ef065..089505a4b 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -1,12 +1,13 @@ package types import ( + "context" "time" ) type GomegaFailHandler func(message string, callerSkip ...int) -//A simple *testing.T interface wrapper +// A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() Fatalf(format string, args ...interface{}) @@ -18,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(actual interface{}, intervals ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Eventually(args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion - Consistently(actual interface{}, intervals ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) AsyncAssertion + Consistently(args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -30,9 +31,9 @@ type Gomega interface { SetDefaultConsistentlyPollingInterval(time.Duration) } -//All Gomega matchers must implement the GomegaMatcher interface +// All Gomega matchers must implement the GomegaMatcher interface // -//For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers +// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { Match(actual interface{}) (success bool, err error) FailureMessage(actual interface{}) (message string) @@ -70,6 +71,10 @@ type AsyncAssertion interface { WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion WithPolling(interval time.Duration) AsyncAssertion + Within(timeout time.Duration) AsyncAssertion + ProbeEvery(interval time.Duration) AsyncAssertion + WithContext(ctx context.Context) AsyncAssertion + WithArguments(argsToForward ...interface{}) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/vendor/github.com/projectcalico/api/LICENSE b/vendor/github.com/projectcalico/api/LICENSE new file mode 100644 index 000000000..68c771a09 --- /dev/null +++ b/vendor/github.com/projectcalico/api/LICENSE @@ -0,0 +1,176 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD new file mode 100644 index 000000000..e7d227e66 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/BUILD @@ -0,0 +1,27 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "register.go", + "types.go", + "zz_generated.conversion.go", + "zz_generated.deepcopy.go", + ], + tags = ["automanaged"], + deps = [ + "k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "k8s.io/apimachinery/pkg/conversion:go_default_library", + "k8s.io/apimachinery/pkg/runtime:go_default_library", + "k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "k8s.io/sample-apiserver/pkg/apis/wardle:go_default_library", + ], +) diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go new file mode 100644 index 000000000..70316efe8 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpconfig.go @@ -0,0 +1,160 @@ +// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcalico/api/pkg/lib/numorstring" +) + +const ( + KindBGPConfiguration = "BGPConfiguration" + KindBGPConfigurationList = "BGPConfigurationList" +) + +type BindMode string + +const ( + BindModeNone BindMode = "None" + BindModeNodeIP BindMode = "NodeIP" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BGPConfigurationList is a list of BGPConfiguration resources. +type BGPConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []BGPConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type BGPConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec BGPConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// BGPConfigurationSpec contains the values of the BGP configuration. +type BGPConfigurationSpec struct { + // LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: INFO] + LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel" confignamev1:"loglevel"` + + // NodeToNodeMeshEnabled sets whether full node to node BGP mesh is enabled. [Default: true] + NodeToNodeMeshEnabled *bool `json:"nodeToNodeMeshEnabled,omitempty" validate:"omitempty" confignamev1:"node_mesh"` + + // ASNumber is the default AS number used by a node. [Default: 64512] + ASNumber *numorstring.ASNumber `json:"asNumber,omitempty" validate:"omitempty" confignamev1:"as_num"` + + // ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes Service LoadBalancer IPs. + // Kubernetes Service status.LoadBalancer.Ingress IPs will only be advertised if they are within one of these blocks. + ServiceLoadBalancerIPs []ServiceLoadBalancerIPBlock `json:"serviceLoadBalancerIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_loadbalancer_ips"` + + // ServiceExternalIPs are the CIDR blocks for Kubernetes Service External IPs. + // Kubernetes Service ExternalIPs will only be advertised if they are within one of these blocks. + ServiceExternalIPs []ServiceExternalIPBlock `json:"serviceExternalIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_external_ips"` + + // ServiceClusterIPs are the CIDR blocks from which service cluster IPs are allocated. + // If specified, Calico will advertise these blocks, as well as any cluster IPs within them. + ServiceClusterIPs []ServiceClusterIPBlock `json:"serviceClusterIPs,omitempty" validate:"omitempty,dive" confignamev1:"svc_cluster_ips"` + + // Communities is a list of BGP community values and their arbitrary names for tagging routes. + Communities []Community `json:"communities,omitempty" validate:"omitempty,dive" confignamev1:"communities"` + + // PrefixAdvertisements contains per-prefix advertisement configuration. + PrefixAdvertisements []PrefixAdvertisement `json:"prefixAdvertisements,omitempty" validate:"omitempty,dive" confignamev1:"prefix_advertisements"` + + // ListenPort is the port where BGP protocol should listen. Defaults to 179 + // +kubebuilder:validation:Minimum:=1 + // +kubebuilder:validation:Maximum:=65535 + ListenPort uint16 `json:"listenPort,omitempty" validate:"omitempty,gt=0" confignamev1:"listen_port"` + + // Optional BGP password for full node-to-mesh peerings. + // This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled + // +optional + NodeMeshPassword *BGPPassword `json:"nodeMeshPassword,omitempty" validate:"omitempty" confignamev1:"node_mesh_password"` + + // Time to allow for software restart for node-to-mesh peerings. When specified, this is configured + // as the graceful restart timeout. When not specified, the BIRD default of 120s is used. + // This field can only be set on the default BGPConfiguration instance and requires that NodeMesh is enabled + // +optional + NodeMeshMaxRestartTime *metav1.Duration `json:"nodeMeshMaxRestartTime,omitempty" confignamev1:"node_mesh_restart_time"` + + // BindMode indicates whether to listen for BGP connections on all addresses (None) + // or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP). + // Default behaviour is to listen for BGP connections on all addresses. + // +optional + BindMode *BindMode `json:"bindMode,omitempty"` + + // IgnoredInterfaces indicates the network interfaces that needs to be excluded when reading device routes. + // +optional + IgnoredInterfaces []string `json:"ignoredInterfaces,omitempty" validate:"omitempty,dive,ignoredInterface"` +} + +// ServiceLoadBalancerIPBlock represents a single allowed LoadBalancer IP CIDR block. +type ServiceLoadBalancerIPBlock struct { + CIDR string `json:"cidr,omitempty" validate:"omitempty,net"` +} + +// ServiceExternalIPBlock represents a single allowed External IP CIDR block. +type ServiceExternalIPBlock struct { + CIDR string `json:"cidr,omitempty" validate:"omitempty,net"` +} + +// ServiceClusterIPBlock represents a single allowed ClusterIP CIDR block. +type ServiceClusterIPBlock struct { + CIDR string `json:"cidr,omitempty" validate:"omitempty,net"` +} + +// Community contains standard or large community value and its name. +type Community struct { + // Name given to community value. + Name string `json:"name,omitempty" validate:"required,name"` + // Value must be of format `aa:nn` or `aa:nn:mm`. + // For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. + // For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. + // Where, `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + // +kubebuilder:validation:Pattern=`^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$` + Value string `json:"value,omitempty" validate:"required"` +} + +// PrefixAdvertisement configures advertisement properties for the specified CIDR. +type PrefixAdvertisement struct { + // CIDR for which properties should be advertised. + CIDR string `json:"cidr,omitempty" validate:"required,net"` + // Communities can be list of either community names already defined in `Specs.Communities` or community value of format `aa:nn` or `aa:nn:mm`. + // For standard community use `aa:nn` format, where `aa` and `nn` are 16 bit number. + // For large community use `aa:nn:mm` format, where `aa`, `nn` and `mm` are 32 bit number. + // Where,`aa` is an AS Number, `nn` and `mm` are per-AS identifier. + Communities []string `json:"communities,omitempty" validate:"required"` +} + +// New BGPConfiguration creates a new (zeroed) BGPConfiguration struct with the TypeMetadata +// initialized to the current version. +func NewBGPConfiguration() *BGPConfiguration { + return &BGPConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: KindBGPConfiguration, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go new file mode 100644 index 000000000..ebd1510ad --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgpfilter.go @@ -0,0 +1,106 @@ +// Copyright (c) 2022 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindBGPFilter = "BGPFilter" + KindBGPFilterList = "BGPFilterList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BGPFilterList is a list of BGPFilter resources. +type BGPFilterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []BGPFilter `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type BGPFilter struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec BGPFilterSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// BGPFilterSpec contains the IPv4 and IPv6 filter rules of the BGP Filter. +type BGPFilterSpec struct { + // The ordered set of IPv4 BGPFilter rules acting on exporting routes to a peer. + ExportV4 []BGPFilterRuleV4 `json:"exportV4,omitempty" validate:"omitempty,dive"` + + // The ordered set of IPv4 BGPFilter rules acting on importing routes from a peer. + ImportV4 []BGPFilterRuleV4 `json:"importV4,omitempty" validate:"omitempty,dive"` + + // The ordered set of IPv6 BGPFilter rules acting on exporting routes to a peer. + ExportV6 []BGPFilterRuleV6 `json:"exportV6,omitempty" validate:"omitempty,dive"` + + // The ordered set of IPv6 BGPFilter rules acting on importing routes from a peer. + ImportV6 []BGPFilterRuleV6 `json:"importV6,omitempty" validate:"omitempty,dive"` +} + +// BGPFilterRuleV4 defines a BGP filter rule consisting a single IPv4 CIDR block and a filter action for this CIDR. +type BGPFilterRuleV4 struct { + CIDR string `json:"cidr" validate:"required,netv4"` + + MatchOperator BGPFilterMatchOperator `json:"matchOperator" validate:"required,matchOperator"` + + Action BGPFilterAction `json:"action" validate:"required,filterAction"` +} + +// BGPFilterRuleV6 defines a BGP filter rule consisting a single IPv6 CIDR block and a filter action for this CIDR. +type BGPFilterRuleV6 struct { + CIDR string `json:"cidr" validate:"required,netv6"` + + MatchOperator BGPFilterMatchOperator `json:"matchOperator" validate:"required,matchOperator"` + + Action BGPFilterAction `json:"action" validate:"required,filterAction"` +} + +type BGPFilterMatchOperator string + +const ( + Equal BGPFilterMatchOperator = "Equal" + NotEqual = "NotEqual" + In = "In" + NotIn = "NotIn" +) + +type BGPFilterAction string + +const ( + Accept BGPFilterAction = "Accept" + Reject = "Reject" +) + +// New BGPFilter creates a new (zeroed) BGPFilter struct with the TypeMetadata +// initialized to the current version. +func NewBGPFilter() *BGPFilter { + return &BGPFilter{ + TypeMeta: metav1.TypeMeta{ + Kind: KindBGPFilter, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go new file mode 100644 index 000000000..0b75b0163 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/bgppeer.go @@ -0,0 +1,141 @@ +// Copyright (c) 2017,2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + k8sv1 "k8s.io/api/core/v1" + + "github.com/projectcalico/api/pkg/lib/numorstring" +) + +const ( + KindBGPPeer = "BGPPeer" + KindBGPPeerList = "BGPPeerList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BGPPeerList is a list of BGPPeer resources. +type BGPPeerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []BGPPeer `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type BGPPeer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec BGPPeerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// BGPPeerSpec contains the specification for a BGPPeer resource. +type BGPPeerSpec struct { + // The node name identifying the Calico node instance that is targeted by this peer. + // If this is not set, and no nodeSelector is specified, then this BGP peer selects all + // nodes in the cluster. + // +optional + Node string `json:"node,omitempty" validate:"omitempty,name"` + + // Selector for the nodes that should have this peering. When this is set, the Node + // field must be empty. + // +optional + NodeSelector string `json:"nodeSelector,omitempty" validate:"omitempty,selector"` + + // The IP address of the peer followed by an optional port number to peer with. + // If port number is given, format should be `[]:port` or `:` for IPv4. + // If optional port number is not set, and this peer IP and ASNumber belongs to a calico/node + // with ListenPort set in BGPConfiguration, then we use that port to peer. + // +optional + PeerIP string `json:"peerIP,omitempty" validate:"omitempty,IP:port"` + + // The AS Number of the peer. + // +optional + ASNumber numorstring.ASNumber `json:"asNumber,omitempty"` + + // Selector for the remote nodes to peer with. When this is set, the PeerIP and + // ASNumber fields must be empty. For each peering between the local node and + // selected remote nodes, we configure an IPv4 peering if both ends have + // NodeBGPSpec.IPv4Address specified, and an IPv6 peering if both ends have + // NodeBGPSpec.IPv6Address specified. The remote AS number comes from the remote + // node's NodeBGPSpec.ASNumber, or the global default if that is not set. + // +optional + PeerSelector string `json:"peerSelector,omitempty" validate:"omitempty,selector"` + + // Option to keep the original nexthop field when routes are sent to a BGP Peer. + // Setting "true" configures the selected BGP Peers node to use the "next hop keep;" + // instead of "next hop self;"(default) in the specific branch of the Node on "bird.cfg". + KeepOriginalNextHop bool `json:"keepOriginalNextHop,omitempty"` + + // Optional BGP password for the peerings generated by this BGPPeer resource. + Password *BGPPassword `json:"password,omitempty" validate:"omitempty"` + // Specifies whether and how to configure a source address for the peerings generated by + // this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the + // source address. "None" means not to configure a source address. + SourceAddress SourceAddress `json:"sourceAddress,omitempty" validate:"omitempty,sourceAddress"` + // Time to allow for software restart. When specified, this is configured as the graceful + // restart timeout. When not specified, the BIRD default of 120s is used. + MaxRestartTime *metav1.Duration `json:"maxRestartTime,omitempty"` + // Maximum number of local AS numbers that are allowed in the AS path for received routes. + // This removes BGP loop prevention and should only be used if absolutely necesssary. + // +optional + NumAllowedLocalASNumbers *int32 `json:"numAllowedLocalASNumbers,omitempty"` + // TTLSecurity enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by + // ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops + // (edges) between the peers. + // +optional + TTLSecurity *uint8 `json:"ttlSecurity,omitempty"` + + // Add an exact, i.e. /32, static route toward peer IP in order to prevent route flapping. + // ReachableBy contains the address of the gateway which peer can be reached by. + // +optional + ReachableBy string `json:"reachableBy,omitempty" validate:"omitempty,reachableBy"` + + // The ordered set of BGPFilters applied on this BGP peer. + // +optional + Filters []string `json:"filters,omitempty" validate:"omitempty,dive,name"` +} + +type SourceAddress string + +const ( + SourceAddressUseNodeIP SourceAddress = "UseNodeIP" + SourceAddressNone = "None" +) + +// BGPPassword contains ways to specify a BGP password. +type BGPPassword struct { + // Selects a key of a secret in the node pod's namespace. + SecretKeyRef *k8sv1.SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +// NewBGPPeer creates a new (zeroed) BGPPeer struct with the TypeMetadata initialised to the current +// version. +func NewBGPPeer() *BGPPeer { + return &BGPPeer{ + TypeMeta: metav1.TypeMeta{ + Kind: KindBGPPeer, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go new file mode 100644 index 000000000..c90de6bbe --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/blockaffinity.go @@ -0,0 +1,94 @@ +// Copyright (c) 2022 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindBlockAffinity = "BlockAffinity" + KindBlockAffinityList = "BlockAffinityList" +) + +type BlockAffinityState string + +const ( + StateConfirmed BlockAffinityState = "confirmed" + StatePending BlockAffinityState = "pending" + StatePendingDeletion BlockAffinityState = "pendingDeletion" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BlockAffinity maintains a block affinity's state +type BlockAffinity struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the BlockAffinity. + Spec BlockAffinitySpec `json:"spec,omitempty"` +} + +// BlockAffinitySpec contains the specification for a BlockAffinity resource. +type BlockAffinitySpec struct { + // The state of the block affinity with regard to any referenced IPAM blocks. + State BlockAffinityState `json:"state"` + + // The node that this block affinity is assigned to. + Node string `json:"node"` + + // The CIDR range this block affinity references. + CIDR string `json:"cidr"` + + // Deleted indicates whether or not this block affinity is disabled and is + // used as part of race-condition prevention. When set to true, clients + // should treat this block as if it does not exist. + Deleted bool `json:"deleted,omitempty"` +} + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BlockAffinityList contains a list of BlockAffinity resources. +type BlockAffinityList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []BlockAffinity `json:"items"` +} + +// NewBlockAffinity creates a new (zeroed) BlockAffinity struct with the TypeMetadata initialised to the current +// version. +func NewBlockAffinity() *BlockAffinity { + return &BlockAffinity{ + TypeMeta: metav1.TypeMeta{ + Kind: KindBlockAffinity, + APIVersion: GroupVersionCurrent, + }, + } +} + +// NewBlockAffinityList creates a new (zeroed) BlockAffinityList struct with the TypeMetadata initialised to the current +// version. +func NewBlockAffinityList() *BlockAffinityList { + return &BlockAffinityList{ + TypeMeta: metav1.TypeMeta{ + Kind: KindBlockAffinityList, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/clusterinfo.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/clusterinfo.go new file mode 100644 index 000000000..c28d81b22 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/clusterinfo.go @@ -0,0 +1,72 @@ +// Copyright (c) 2017, 2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindClusterInformation = "ClusterInformation" + KindClusterInformationList = "ClusterInformationList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterInformationList is a list of ClusterInformation objects. +type ClusterInformationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []ClusterInformation `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ClusterInformation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec ClusterInformationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterInformationSpec contains the values of describing the cluster. +type ClusterInformationSpec struct { + // ClusterGUID is the GUID of the cluster + ClusterGUID string `json:"clusterGUID,omitempty" validate:"omitempty"` + // ClusterType describes the type of the cluster + ClusterType string `json:"clusterType,omitempty" validate:"omitempty"` + // CalicoVersion is the version of Calico that the cluster is running + CalicoVersion string `json:"calicoVersion,omitempty" validate:"omitempty"` + // DatastoreReady is used during significant datastore migrations to signal to components + // such as Felix that it should wait before accessing the datastore. + DatastoreReady *bool `json:"datastoreReady,omitempty"` + // Variant declares which variant of Calico should be active. + Variant string `json:"variant,omitempty"` +} + +// New ClusterInformation creates a new (zeroed) ClusterInformation struct with the TypeMetadata +// initialized to the current version. +func NewClusterInformation() *ClusterInformation { + return &ClusterInformation{ + TypeMeta: metav1.TypeMeta{ + Kind: KindClusterInformation, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/constants.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/constants.go new file mode 100644 index 000000000..b596efe82 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/constants.go @@ -0,0 +1,52 @@ +// Copyright (c) 2017-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +const ( + // API group details for the Calico v3 API. + Group = "projectcalico.org" + VersionCurrent = "v3" + GroupVersionCurrent = Group + "/" + VersionCurrent + + // AllNamepaces is used for client instantiation, either for when the namespace + // will be specified in the resource request, or for List or Watch queries across + // all namespaces. + AllNamespaces = "" + + // AllNames is used for List or Watch queries to wildcard the name. + AllNames = "" + + // Label used to denote the Namespace. This is added to workload endpoints and network sets by Calico + // and may be used for label matches by Policy selectors. + LabelNamespace = "projectcalico.org/namespace" + + // Label used to denote the ServiceAccount. This is added to the workload endpoints by Calico + // and may be used for label matches by Policy selectors. + LabelServiceAccount = "projectcalico.org/serviceaccount" + + // Label used to denote the Orchestrator. This is added to the workload endpoints by an + // orchestrator. + LabelOrchestrator = "projectcalico.org/orchestrator" + + // Known orchestrators. Orchestrators are not limited to this list. + OrchestratorKubernetes = "k8s" + OrchestratorCNI = "cni" + OrchestratorDocker = "libnetwork" + OrchestratorOpenStack = "openstack" + + // Enum options for enable/disable fields + Enabled = "Enabled" + Disabled = "Disabled" +) diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go new file mode 100644 index 000000000..c68f4e98f --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/conversion.go @@ -0,0 +1,183 @@ +// Copyright (c) 2019-2021 Tigera, Inc. All rights reserved. + +package v3 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add non-generated conversion functions + err := scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "NetworkPolicy"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "GlobalNetworkPolicy"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "GlobalNetworkSet"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "NetworkSet"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "HostEndpoint"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "IPPool"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "BGPConfiguration"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "BGPPeer"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "Profile"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "FelixConfiguration"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "KubeControllersConfiguration"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + err = scheme.AddFieldLabelConversionFunc(schema.GroupVersionKind{"projectcalico.org", "v3", "ClusterInformation"}, + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go new file mode 100644 index 000000000..ddeef25ca --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/doc.go @@ -0,0 +1,9 @@ +// Copyright (c) 2021 Tigera, Inc. All rights reserved. + +// +k8s:deepcopy-gen=package,register +// +k8s:openapi-gen=true + +// Package v3 is the v3 version of the API. +// +groupName=projectcalico.org + +package v3 // import "github.com/projectcalico/api/pkg/apis/projectcalico/v3" diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go new file mode 100644 index 000000000..20e0bc16e --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/felixconfig.go @@ -0,0 +1,557 @@ +// Copyright (c) 2017-2022 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcalico/api/pkg/lib/numorstring" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// FelixConfigurationList contains a list of FelixConfiguration object. +type FelixConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []FelixConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type FelixConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec FelixConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type IptablesBackend string + +const ( + KindFelixConfiguration = "FelixConfiguration" + KindFelixConfigurationList = "FelixConfigurationList" + IptablesBackendLegacy = "Legacy" + IptablesBackendNFTables = "NFT" + IptablesBackendAuto = "Auto" +) + +// +kubebuilder:validation:Enum=DoNothing;Enable;Disable +type AWSSrcDstCheckOption string + +const ( + AWSSrcDstCheckOptionDoNothing AWSSrcDstCheckOption = "DoNothing" + AWSSrcDstCheckOptionEnable = "Enable" + AWSSrcDstCheckOptionDisable = "Disable" +) + +// +kubebuilder:validation:Enum=Enabled;Disabled +type FloatingIPType string + +const ( + FloatingIPsEnabled FloatingIPType = "Enabled" + FloatingIPsDisabled FloatingIPType = "Disabled" +) + +// FelixConfigurationSpec contains the values of the Felix configuration. +type FelixConfigurationSpec struct { + // UseInternalDataplaneDriver, if true, Felix will use its internal dataplane programming logic. If false, it + // will launch an external dataplane driver and communicate with it over protobuf. + UseInternalDataplaneDriver *bool `json:"useInternalDataplaneDriver,omitempty"` + // DataplaneDriver filename of the external dataplane driver to use. Only used if UseInternalDataplaneDriver + // is set to false. + DataplaneDriver string `json:"dataplaneDriver,omitempty"` + + // DataplaneWatchdogTimeout is the readiness/liveness timeout used for Felix's (internal) dataplane driver. + // Increase this value if you experience spurious non-ready or non-live events when Felix is under heavy load. + // Decrease the value to get felix to report non-live or non-ready more quickly. [Default: 90s] + // + // Deprecated: replaced by the generic HealthTimeoutOverrides. + DataplaneWatchdogTimeout *metav1.Duration `json:"dataplaneWatchdogTimeout,omitempty" configv1timescale:"seconds"` + + // IPv6Support controls whether Felix enables support for IPv6 (if supported by the in-use dataplane). + IPv6Support *bool `json:"ipv6Support,omitempty" confignamev1:"Ipv6Support"` + + // RouteRefreshInterval is the period at which Felix re-checks the routes + // in the dataplane to ensure that no other process has accidentally broken Calico's rules. + // Set to 0 to disable route refresh. [Default: 90s] + RouteRefreshInterval *metav1.Duration `json:"routeRefreshInterval,omitempty" configv1timescale:"seconds"` + // InterfaceRefreshInterval is the period at which Felix rescans local interfaces to verify their state. + // The rescan can be disabled by setting the interval to 0. + InterfaceRefreshInterval *metav1.Duration `json:"interfaceRefreshInterval,omitempty" configv1timescale:"seconds"` + // IptablesRefreshInterval is the period at which Felix re-checks the IP sets + // in the dataplane to ensure that no other process has accidentally broken Calico's rules. + // Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the + // other refresh intervals as a workaround for a Linux kernel bug that was fixed in kernel + // version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value + // to reduce Felix CPU usage. [Default: 10s] + IptablesRefreshInterval *metav1.Duration `json:"iptablesRefreshInterval,omitempty" configv1timescale:"seconds"` + // IptablesPostWriteCheckInterval is the period after Felix has done a write + // to the dataplane that it schedules an extra read back in order to check the write was not + // clobbered by another process. This should only occur if another application on the system + // doesn't respect the iptables lock. [Default: 1s] + IptablesPostWriteCheckInterval *metav1.Duration `json:"iptablesPostWriteCheckInterval,omitempty" configv1timescale:"seconds" confignamev1:"IptablesPostWriteCheckIntervalSecs"` + // IptablesLockFilePath is the location of the iptables lock file. You may need to change this + // if the lock file is not in its standard location (for example if you have mapped it into Felix's + // container at a different path). [Default: /run/xtables.lock] + IptablesLockFilePath string `json:"iptablesLockFilePath,omitempty"` + // IptablesLockTimeout is the time that Felix will wait for the iptables lock, + // or 0, to disable. To use this feature, Felix must share the iptables lock file with all other + // processes that also take the lock. When running Felix inside a container, this requires the + // /run directory of the host to be mounted into the calico/node or calico/felix container. + // [Default: 0s disabled] + IptablesLockTimeout *metav1.Duration `json:"iptablesLockTimeout,omitempty" configv1timescale:"seconds" confignamev1:"IptablesLockTimeoutSecs"` + // IptablesLockProbeInterval is the time that Felix will wait between + // attempts to acquire the iptables lock if it is not available. Lower values make Felix more + // responsive when the lock is contended, but use more CPU. [Default: 50ms] + IptablesLockProbeInterval *metav1.Duration `json:"iptablesLockProbeInterval,omitempty" configv1timescale:"milliseconds" confignamev1:"IptablesLockProbeIntervalMillis"` + // FeatureDetectOverride is used to override feature detection based on auto-detected platform + // capabilities. Values are specified in a comma separated list with no spaces, example; + // "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will + // force the feature, empty or omitted values are auto-detected. + FeatureDetectOverride string `json:"featureDetectOverride,omitempty" validate:"omitempty,keyValueList"` + // FeatureGates is used to enable or disable tech-preview Calico features. + // Values are specified in a comma separated list with no spaces, example; + // "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". This is + // used to enable features that are not fully production ready. + FeatureGates string `json:"featureGates,omitempty" validate:"omitempty,keyValueList"` + // IpsetsRefreshInterval is the period at which Felix re-checks all iptables + // state to ensure that no other process has accidentally broken Calico's rules. Set to 0 to + // disable iptables refresh. [Default: 90s] + IpsetsRefreshInterval *metav1.Duration `json:"ipsetsRefreshInterval,omitempty" configv1timescale:"seconds"` + MaxIpsetSize *int `json:"maxIpsetSize,omitempty"` + // IptablesBackend specifies which backend of iptables will be used. The default is Auto. + IptablesBackend *IptablesBackend `json:"iptablesBackend,omitempty" validate:"omitempty,iptablesBackend"` + + // XDPRefreshInterval is the period at which Felix re-checks all XDP state to ensure that no + // other process has accidentally broken Calico's BPF maps or attached programs. Set to 0 to + // disable XDP refresh. [Default: 90s] + XDPRefreshInterval *metav1.Duration `json:"xdpRefreshInterval,omitempty" configv1timescale:"seconds"` + + NetlinkTimeout *metav1.Duration `json:"netlinkTimeout,omitempty" configv1timescale:"seconds" confignamev1:"NetlinkTimeoutSecs"` + + // MetadataAddr is the IP address or domain name of the server that can answer VM queries for + // cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in + // Ubuntu, nova-api-metadata). A value of none (case insensitive) means that Felix should not + // set up any NAT rule for the metadata path. [Default: 127.0.0.1] + MetadataAddr string `json:"metadataAddr,omitempty"` + // MetadataPort is the port of the metadata server. This, combined with global.MetadataAddr (if + // not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + // In most cases this should not need to be changed [Default: 8775]. + MetadataPort *int `json:"metadataPort,omitempty"` + + // OpenstackRegion is the name of the region that a particular Felix belongs to. In a multi-region + // Calico/OpenStack deployment, this must be configured somehow for each Felix (here in the datamodel, + // or in felix.cfg or the environment on each compute node), and must match the [calico] + // openstack_region value configured in neutron.conf on each node. [Default: Empty] + OpenstackRegion string `json:"openstackRegion,omitempty"` + + // InterfacePrefix is the interface name prefix that identifies workload endpoints and so distinguishes + // them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators + // configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value, + // and our OpenStack integration sets the 'tap' value. [Default: cali] + InterfacePrefix string `json:"interfacePrefix,omitempty"` + // InterfaceExclude is a comma-separated list of interfaces that Felix should exclude when monitoring for host + // endpoints. The default value ensures that Felix ignores Kubernetes' IPVS dummy interface, which is used + // internally by kube-proxy. If you want to exclude multiple interface names using a single value, the list + // supports regular expressions. For regular expressions you must wrap the value with '/'. For example + // having values '/^kube/,veth1' will exclude all interfaces that begin with 'kube' and also the interface + // 'veth1'. [Default: kube-ipvs0] + InterfaceExclude string `json:"interfaceExclude,omitempty"` + + // ChainInsertMode controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule + // at the top of the chain or by appending a rule at the bottom. insert is the safe default since it prevents + // Calico's rules from being bypassed. If you switch to append mode, be sure that the other rules in the chains + // signal acceptance by falling through to the Calico rules, otherwise the Calico policy will be bypassed. + // [Default: insert] + ChainInsertMode string `json:"chainInsertMode,omitempty"` + // DefaultEndpointToHostAction controls what happens to traffic that goes from a workload endpoint to the host + // itself (after the traffic hits the endpoint egress policy). By default Calico blocks traffic from workload + // endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from + // endpoint to host, set this parameter to RETURN or ACCEPT. Use RETURN if you have your own rules in the iptables + // "INPUT" chain; Calico will insert its rules at the top of that chain, then "RETURN" packets to the "INPUT" chain + // once it has completed processing workload endpoint egress policy. Use ACCEPT to unconditionally accept packets + // from workloads after processing workload endpoint egress policy. [Default: Drop] + DefaultEndpointToHostAction string `json:"defaultEndpointToHostAction,omitempty" validate:"omitempty,dropAcceptReturn"` + IptablesFilterAllowAction string `json:"iptablesFilterAllowAction,omitempty" validate:"omitempty,acceptReturn"` + IptablesMangleAllowAction string `json:"iptablesMangleAllowAction,omitempty" validate:"omitempty,acceptReturn"` + // IptablesFilterDenyAction controls what happens to traffic that is denied by network policy. By default Calico blocks traffic + // with an iptables "DROP" action. If you want to use "REJECT" action instead you can configure it in here. + IptablesFilterDenyAction string `json:"iptablesFilterDenyAction,omitempty" validate:"omitempty,dropReject"` + // LogPrefix is the log prefix that Felix uses when rendering LOG rules. [Default: calico-packet] + LogPrefix string `json:"logPrefix,omitempty"` + + // LogFilePath is the full path to the Felix log. Set to none to disable file logging. [Default: /var/log/calico/felix.log] + LogFilePath string `json:"logFilePath,omitempty"` + + // LogSeverityFile is the log severity above which logs are sent to the log file. [Default: Info] + LogSeverityFile string `json:"logSeverityFile,omitempty" validate:"omitempty,logLevel"` + // LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info] + LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel"` + // LogSeveritySys is the log severity above which logs are sent to the syslog. Set to None for no logging to syslog. + // [Default: Info] + LogSeveritySys string `json:"logSeveritySys,omitempty" validate:"omitempty,logLevel"` + // LogDebugFilenameRegex controls which source code files have their Debug log output included in the logs. + // Only logs from files with names that match the given regular expression are included. The filter only applies + // to Debug level logs. + LogDebugFilenameRegex string `json:"logDebugFilenameRegex,omitempty" validate:"omitempty,regexp"` + + // IPIPEnabled overrides whether Felix should configure an IPIP interface on the host. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)] + IPIPEnabled *bool `json:"ipipEnabled,omitempty" confignamev1:"IpInIpEnabled"` + // IPIPMTU is the MTU to set on the tunnel device. See Configuring MTU [Default: 1440] + IPIPMTU *int `json:"ipipMTU,omitempty" confignamev1:"IpInIpMtu"` + + // VXLANEnabled overrides whether Felix should create the VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix determines this based on the existing IP pools. [Default: nil (unset)] + VXLANEnabled *bool `json:"vxlanEnabled,omitempty" confignamev1:"VXLANEnabled"` + // VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel device. See Configuring MTU [Default: 1410] + VXLANMTU *int `json:"vxlanMTU,omitempty"` + // VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel device. See Configuring MTU [Default: 1390] + VXLANMTUV6 *int `json:"vxlanMTUV6,omitempty"` + VXLANPort *int `json:"vxlanPort,omitempty"` + VXLANVNI *int `json:"vxlanVNI,omitempty"` + + // AllowVXLANPacketsFromWorkloads controls whether Felix will add a rule to drop VXLAN encapsulated traffic + // from workloads [Default: false] + // +optional + AllowVXLANPacketsFromWorkloads *bool `json:"allowVXLANPacketsFromWorkloads,omitempty"` + // AllowIPIPPacketsFromWorkloads controls whether Felix will add a rule to drop IPIP encapsulated traffic + // from workloads [Default: false] + // +optional + AllowIPIPPacketsFromWorkloads *bool `json:"allowIPIPPacketsFromWorkloads,omitempty"` + + // ReportingInterval is the interval at which Felix reports its status into the datastore or 0 to disable. + // Must be non-zero in OpenStack deployments. [Default: 30s] + ReportingInterval *metav1.Duration `json:"reportingInterval,omitempty" configv1timescale:"seconds" confignamev1:"ReportingIntervalSecs"` + // ReportingTTL is the time-to-live setting for process-wide status reports. [Default: 90s] + ReportingTTL *metav1.Duration `json:"reportingTTL,omitempty" configv1timescale:"seconds" confignamev1:"ReportingTTLSecs"` + + EndpointReportingEnabled *bool `json:"endpointReportingEnabled,omitempty"` + EndpointReportingDelay *metav1.Duration `json:"endpointReportingDelay,omitempty" configv1timescale:"seconds" confignamev1:"EndpointReportingDelaySecs"` + + // IptablesMarkMask is the mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal + // number with at least 8 bits set, none of which clash with any other mark bits in use on the system. + // [Default: 0xff000000] + IptablesMarkMask *uint32 `json:"iptablesMarkMask,omitempty"` + + DisableConntrackInvalidCheck *bool `json:"disableConntrackInvalidCheck,omitempty"` + + HealthEnabled *bool `json:"healthEnabled,omitempty"` + HealthHost *string `json:"healthHost,omitempty"` + HealthPort *int `json:"healthPort,omitempty"` + // HealthTimeoutOverrides allows the internal watchdog timeouts of individual subcomponents to be + // overridden. This is useful for working around "false positive" liveness timeouts that can occur + // in particularly stressful workloads or if CPU is constrained. For a list of active + // subcomponents, see Felix's logs. + HealthTimeoutOverrides []HealthTimeoutOverride `json:"healthTimeoutOverrides,omitempty" validate:"omitempty,dive"` + + // PrometheusMetricsEnabled enables the Prometheus metrics server in Felix if set to true. [Default: false] + PrometheusMetricsEnabled *bool `json:"prometheusMetricsEnabled,omitempty"` + // PrometheusMetricsHost is the host that the Prometheus metrics server should bind to. [Default: empty] + PrometheusMetricsHost string `json:"prometheusMetricsHost,omitempty" validate:"omitempty,prometheusHost"` + // PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. [Default: 9091] + PrometheusMetricsPort *int `json:"prometheusMetricsPort,omitempty"` + // PrometheusGoMetricsEnabled disables Go runtime metrics collection, which the Prometheus client does by default, when + // set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] + PrometheusGoMetricsEnabled *bool `json:"prometheusGoMetricsEnabled,omitempty"` + // PrometheusProcessMetricsEnabled disables process metrics collection, which the Prometheus client does by default, when + // set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] + PrometheusProcessMetricsEnabled *bool `json:"prometheusProcessMetricsEnabled,omitempty"` + // PrometheusWireGuardMetricsEnabled disables wireguard metrics collection, which the Prometheus client does by default, when + // set to false. This reduces the number of metrics reported, reducing Prometheus load. [Default: true] + PrometheusWireGuardMetricsEnabled *bool `json:"prometheusWireGuardMetricsEnabled,omitempty"` + + // FailsafeInboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow incoming traffic to host endpoints + // on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. + // For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow + // traffic from all addresses. To disable all inbound host ports, use the value none. The default value allows ssh access + // and DHCP. + // [Default: tcp:22, udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667] + FailsafeInboundHostPorts *[]ProtoPort `json:"failsafeInboundHostPorts,omitempty"` + // FailsafeOutboundHostPorts is a list of UDP/TCP ports and CIDRs that Felix will allow outgoing traffic from host endpoints + // to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. + // For back-compatibility, if the protocol is not specified, it defaults to "tcp". If a CIDR is not specified, it will allow + // traffic from all addresses. To disable all outbound host ports, use the value none. The default value opens etcd's standard + // ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP and DNS. + // [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667, udp:53, udp:67] + FailsafeOutboundHostPorts *[]ProtoPort `json:"failsafeOutboundHostPorts,omitempty"` + + // KubeNodePortRanges holds list of port ranges used for service node ports. Only used if felix detects kube-proxy running in ipvs mode. + // Felix uses these ranges to separate host and workload traffic. [Default: 30000:32767]. + KubeNodePortRanges *[]numorstring.Port `json:"kubeNodePortRanges,omitempty" validate:"omitempty,dive"` + + // PolicySyncPathPrefix is used to by Felix to communicate policy changes to external services, + // like Application layer policy. [Default: Empty] + PolicySyncPathPrefix string `json:"policySyncPathPrefix,omitempty"` + + // UsageReportingEnabled reports anonymous Calico version number and cluster size to projectcalico.org. Logs warnings returned by the usage + // server. For example, if a significant security vulnerability has been discovered in the version of Calico being used. [Default: true] + UsageReportingEnabled *bool `json:"usageReportingEnabled,omitempty"` + // UsageReportingInitialDelay controls the minimum delay before Felix makes a report. [Default: 300s] + UsageReportingInitialDelay *metav1.Duration `json:"usageReportingInitialDelay,omitempty" configv1timescale:"seconds" confignamev1:"UsageReportingInitialDelaySecs"` + // UsageReportingInterval controls the interval at which Felix makes reports. [Default: 86400s] + UsageReportingInterval *metav1.Duration `json:"usageReportingInterval,omitempty" configv1timescale:"seconds" confignamev1:"UsageReportingIntervalSecs"` + + // NATPortRange specifies the range of ports that is used for port mapping when doing outgoing NAT. When unset the default behavior of the + // network stack is used. + NATPortRange *numorstring.Port `json:"natPortRange,omitempty"` + + // NATOutgoingAddress specifies an address to use when performing source NAT for traffic in a natOutgoing pool that + // is leaving the network. By default the address used is an address on the interface the traffic is leaving on + // (ie it uses the iptables MASQUERADE target) + NATOutgoingAddress string `json:"natOutgoingAddress,omitempty"` + + // This is the IPv4 source address to use on programmed device routes. By default the source address is left blank, + // leaving the kernel to choose the source address used. + DeviceRouteSourceAddress string `json:"deviceRouteSourceAddress,omitempty"` + + // This is the IPv6 source address to use on programmed device routes. By default the source address is left blank, + // leaving the kernel to choose the source address used. + DeviceRouteSourceAddressIPv6 string `json:"deviceRouteSourceAddressIPv6,omitempty"` + + // This defines the route protocol added to programmed device routes, by default this will be RTPROT_BOOT + // when left blank. + DeviceRouteProtocol *int `json:"deviceRouteProtocol,omitempty"` + // Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external + // applications to also add device routes. This is enabled by default which means we will remove externally added routes. + RemoveExternalRoutes *bool `json:"removeExternalRoutes,omitempty"` + + // ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes which may source tunnel traffic and have + // the tunneled traffic be accepted at calico nodes. + ExternalNodesCIDRList *[]string `json:"externalNodesList,omitempty"` + + DebugMemoryProfilePath string `json:"debugMemoryProfilePath,omitempty"` + DebugDisableLogDropping *bool `json:"debugDisableLogDropping,omitempty"` + DebugSimulateCalcGraphHangAfter *metav1.Duration `json:"debugSimulateCalcGraphHangAfter,omitempty" configv1timescale:"seconds"` + DebugSimulateDataplaneHangAfter *metav1.Duration `json:"debugSimulateDataplaneHangAfter,omitempty" configv1timescale:"seconds"` + + IptablesNATOutgoingInterfaceFilter string `json:"iptablesNATOutgoingInterfaceFilter,omitempty" validate:"omitempty,ifaceFilter"` + + // SidecarAccelerationEnabled enables experimental sidecar acceleration [Default: false] + SidecarAccelerationEnabled *bool `json:"sidecarAccelerationEnabled,omitempty"` + + // XDPEnabled enables XDP acceleration for suitable untracked incoming deny rules. [Default: true] + XDPEnabled *bool `json:"xdpEnabled,omitempty" confignamev1:"XDPEnabled"` + + // GenericXDPEnabled enables Generic XDP so network cards that don't support XDP offload or driver + // modes can use XDP. This is not recommended since it doesn't provide better performance than + // iptables. [Default: false] + GenericXDPEnabled *bool `json:"genericXDPEnabled,omitempty" confignamev1:"GenericXDPEnabled"` + + // BPFEnabled, if enabled Felix will use the BPF dataplane. [Default: false] + BPFEnabled *bool `json:"bpfEnabled,omitempty" validate:"omitempty"` + // BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable + // unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and + // cannot insert their own BPF programs to interfere with Calico's. [Default: true] + BPFDisableUnprivileged *bool `json:"bpfDisableUnprivileged,omitempty" validate:"omitempty"` + // BPFLogLevel controls the log level of the BPF programs when in BPF dataplane mode. One of "Off", "Info", or + // "Debug". The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. + // [Default: Off]. + // +optional + BPFLogLevel string `json:"bpfLogLevel" validate:"omitempty,bpfLogLevel"` + // BPFDataIfacePattern is a regular expression that controls which interfaces Felix should attach BPF programs to + // in order to catch traffic to/from the network. This needs to match the interfaces that Calico workload traffic + // flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the + // cluster. It should not match the workload interfaces (usually named cali...). + BPFDataIfacePattern string `json:"bpfDataIfacePattern,omitempty" validate:"omitempty,regexp"` + // BPFL3IfacePattern is a regular expression that allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + // in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows + // over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. + BPFL3IfacePattern string `json:"bpfL3IfacePattern,omitempty" validate:"omitempty,regexp"` + // BPFConnectTimeLoadBalancingEnabled when in BPF mode, controls whether Felix installs the connection-time load + // balancer. The connect-time load balancer is required for the host to be able to reach Kubernetes services + // and it improves the performance of pod-to-service connections. The only reason to disable it is for debugging + // purposes. [Default: true] + BPFConnectTimeLoadBalancingEnabled *bool `json:"bpfConnectTimeLoadBalancingEnabled,omitempty" validate:"omitempty"` + // BPFExternalServiceMode in BPF mode, controls how connections from outside the cluster to services (node ports + // and cluster IPs) are forwarded to remote workloads. If set to "Tunnel" then both request and response traffic + // is tunneled to the remote node. If set to "DSR", the request traffic is tunneled but the response traffic + // is sent directly from the remote node. In "DSR" mode, the remote node appears to use the IP of the ingress + // node; this requires a permissive L2 network. [Default: Tunnel] + BPFExternalServiceMode string `json:"bpfExternalServiceMode,omitempty" validate:"omitempty,bpfServiceMode"` + // BPFDSROptoutCIDRs is a list of CIDRs which are excluded from DSR. That is, clients + // in those CIDRs will accesses nodeports as if BPFExternalServiceMode was set to + // Tunnel. + BPFDSROptoutCIDRs *[]string `json:"bpfDSROptoutCIDRs,omitempty" validate:"omitempty,cidrs"` + // BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an + // external client to a local service. This mark allows us to control how packets of that + // connection are routed within the host and how is routing interpreted by RPF check. [Default: 0] + BPFExtToServiceConnmark *int `json:"bpfExtToServiceConnmark,omitempty" validate:"omitempty,gte=0,lte=4294967295"` + // BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF mode, Felix will proactively clean up the upstream + // Kubernetes kube-proxy's iptables chains. Should only be enabled if kube-proxy is not running. [Default: true] + BPFKubeProxyIptablesCleanupEnabled *bool `json:"bpfKubeProxyIptablesCleanupEnabled,omitempty" validate:"omitempty"` + // BPFKubeProxyMinSyncPeriod, in BPF mode, controls the minimum time between updates to the dataplane for Felix's + // embedded kube-proxy. Lower values give reduced set-up latency. Higher values reduce Felix CPU usage by + // batching up more work. [Default: 1s] + BPFKubeProxyMinSyncPeriod *metav1.Duration `json:"bpfKubeProxyMinSyncPeriod,omitempty" validate:"omitempty" configv1timescale:"seconds"` + // BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls whether Felix's + // embedded kube-proxy accepts EndpointSlices or not. + BPFKubeProxyEndpointSlicesEnabled *bool `json:"bpfKubeProxyEndpointSlicesEnabled,omitempty" validate:"omitempty"` + // BPFPSNATPorts sets the range from which we randomly pick a port if there is a source port + // collision. This should be within the ephemeral range as defined by RFC 6056 (1024–65535) and + // preferably outside the ephemeral ranges used by common operating systems. Linux uses + // 32768–60999, while others mostly use the IANA defined range 49152–65535. It is not necessarily + // a problem if this range overlaps with the operating systems. Both ends of the range are + // inclusive. [Default: 20000:29999] + BPFPSNATPorts *numorstring.Port `json:"bpfPSNATPorts,omitempty"` + // BPFMapSizeNATFrontend sets the size for nat front end map. + // FrontendMap should be large enough to hold an entry for each nodeport, + // external IP and each port in each service. + BPFMapSizeNATFrontend *int `json:"bpfMapSizeNATFrontend,omitempty"` + // BPFMapSizeNATBackend sets the size for nat back end map. + // This is the total number of endpoints. This is mostly + // more than the size of the number of services. + BPFMapSizeNATBackend *int `json:"bpfMapSizeNATBackend,omitempty"` + BPFMapSizeNATAffinity *int `json:"bpfMapSizeNATAffinity,omitempty"` + // BPFMapSizeRoute sets the size for the routes map. The routes map should be large enough + // to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and + // tunnel IPs). + BPFMapSizeRoute *int `json:"bpfMapSizeRoute,omitempty"` + // BPFMapSizeConntrack sets the size for the conntrack map. This map must be large enough to hold + // an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption. + BPFMapSizeConntrack *int `json:"bpfMapSizeConntrack,omitempty"` + // BPFMapSizeIPSets sets the size for ipsets map. The IP sets map must be large enough to hold an entry + // for each endpoint matched by every selector in the source/destination matches in network policy. Selectors + // such as "all()" can result in large numbers of entries (one entry per endpoint in that case). + BPFMapSizeIPSets *int `json:"bpfMapSizeIPSets,omitempty"` + // BPFMapSizeIfState sets the size for ifstate map. The ifstate map must be large enough to hold an entry + // for each device (host + workloads) on a host. + BPFMapSizeIfState *int `json:"bpfMapSizeIfState,omitempty"` + // BPFHostConntrackBypass Controls whether to bypass Linux conntrack in BPF mode for + // workloads and services. [Default: true - bypass Linux conntrack] + BPFHostConntrackBypass *bool `json:"bpfHostConntrackBypass,omitempty"` + // BPFEnforceRPF enforce strict RPF on all host interfaces with BPF programs regardless of + // what is the per-interfaces or global setting. Possible values are Disabled, Strict + // or Loose. [Default: Loose] + BPFEnforceRPF string `json:"bpfEnforceRPF,omitempty"` + // BPFPolicyDebugEnabled when true, Felix records detailed information + // about the BPF policy programs, which can be examined with the calico-bpf command-line tool. + BPFPolicyDebugEnabled *bool `json:"bpfPolicyDebugEnabled,omitempty"` + // RouteSource configures where Felix gets its routing information. + // - WorkloadIPs: use workload endpoints to construct routes. + // - CalicoIPAM: the default - use IPAM data to construct routes. + RouteSource string `json:"routeSource,omitempty" validate:"omitempty,routeSource"` + + // Calico programs additional Linux route tables for various purposes. + // RouteTableRanges specifies a set of table index ranges that Calico should use. + // Deprecates`RouteTableRange`, overrides `RouteTableRange`. + RouteTableRanges *RouteTableRanges `json:"routeTableRanges,omitempty" validate:"omitempty,dive"` + + // Deprecated in favor of RouteTableRanges. + // Calico programs additional Linux route tables for various purposes. + // RouteTableRange specifies the indices of the route tables that Calico should use. + RouteTableRange *RouteTableRange `json:"routeTableRange,omitempty" validate:"omitempty"` + + // RouteSyncDisabled will disable all operations performed on the route table. Set to true to + // run in network-policy mode only. + RouteSyncDisabled *bool `json:"routeSyncDisabled,omitempty"` + + // WireguardEnabled controls whether Wireguard is enabled for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). [Default: false] + WireguardEnabled *bool `json:"wireguardEnabled,omitempty"` + // WireguardEnabledV6 controls whether Wireguard is enabled for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). [Default: false] + WireguardEnabledV6 *bool `json:"wireguardEnabledV6,omitempty"` + // WireguardListeningPort controls the listening port used by IPv4 Wireguard. [Default: 51820] + WireguardListeningPort *int `json:"wireguardListeningPort,omitempty" validate:"omitempty,gt=0,lte=65535"` + // WireguardListeningPortV6 controls the listening port used by IPv6 Wireguard. [Default: 51821] + WireguardListeningPortV6 *int `json:"wireguardListeningPortV6,omitempty" validate:"omitempty,gt=0,lte=65535"` + // WireguardRoutingRulePriority controls the priority value to use for the Wireguard routing rule. [Default: 99] + WireguardRoutingRulePriority *int `json:"wireguardRoutingRulePriority,omitempty" validate:"omitempty,gt=0,lt=32766"` + // WireguardInterfaceName specifies the name to use for the IPv4 Wireguard interface. [Default: wireguard.cali] + WireguardInterfaceName string `json:"wireguardInterfaceName,omitempty" validate:"omitempty,interface"` + // WireguardInterfaceNameV6 specifies the name to use for the IPv6 Wireguard interface. [Default: wg-v6.cali] + WireguardInterfaceNameV6 string `json:"wireguardInterfaceNameV6,omitempty" validate:"omitempty,interface"` + // WireguardMTU controls the MTU on the IPv4 Wireguard interface. See Configuring MTU [Default: 1440] + WireguardMTU *int `json:"wireguardMTU,omitempty"` + // WireguardMTUV6 controls the MTU on the IPv6 Wireguard interface. See Configuring MTU [Default: 1420] + WireguardMTUV6 *int `json:"wireguardMTUV6,omitempty"` + // WireguardHostEncryptionEnabled controls whether Wireguard host-to-host encryption is enabled. [Default: false] + WireguardHostEncryptionEnabled *bool `json:"wireguardHostEncryptionEnabled,omitempty"` + // WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0] + WireguardPersistentKeepAlive *metav1.Duration `json:"wireguardKeepAlive,omitempty"` + + // Set source-destination-check on AWS EC2 instances. Accepted value must be one of "DoNothing", "Enable" or "Disable". + // [Default: DoNothing] + AWSSrcDstCheck *AWSSrcDstCheckOption `json:"awsSrcDstCheck,omitempty" validate:"omitempty,oneof=DoNothing Enable Disable"` + + // When service IP advertisement is enabled, prevent routing loops to service IPs that are + // not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. + // Unless set to "Disabled", in which case such routing loops continue to be allowed. + // [Default: Drop] + ServiceLoopPrevention string `json:"serviceLoopPrevention,omitempty" validate:"omitempty,oneof=Drop Reject Disabled"` + + // WorkloadSourceSpoofing controls whether pods can use the allowedSourcePrefixes annotation to send traffic with a source IP + // address that is not theirs. This is disabled by default. When set to "Any", pods can request any prefix. + WorkloadSourceSpoofing string `json:"workloadSourceSpoofing,omitempty" validate:"omitempty,oneof=Disabled Any"` + + // MTUIfacePattern is a regular expression that controls which interfaces Felix should scan in order + // to calculate the host's MTU. + // This should not match workload interfaces (usually named cali...). + // +optional + MTUIfacePattern string `json:"mtuIfacePattern,omitempty" validate:"omitempty,regexp"` + + // FloatingIPs configures whether or not Felix will program non-OpenStack floating IP addresses. (OpenStack-derived + // floating IPs are always programmed, regardless of this setting.) + // + // +optional + FloatingIPs *FloatingIPType `json:"floatingIPs,omitempty" validate:"omitempty"` +} + +type HealthTimeoutOverride struct { + Name string `json:"name"` + Timeout metav1.Duration `json:"timeout"` +} + +type RouteTableRange struct { + Min int `json:"min"` + Max int `json:"max"` +} + +type RouteTableIDRange struct { + Min int `json:"min"` + Max int `json:"max"` +} + +type RouteTableRanges []RouteTableIDRange + +func (r RouteTableRanges) NumDesignatedTables() int { + var len int = 0 + for _, rng := range r { + len += (rng.Max - rng.Min) + 1 // add one, since range is inclusive + } + + return len +} + +// ProtoPort is combination of protocol, port, and CIDR. Protocol and port must be specified. +type ProtoPort struct { + Protocol string `json:"protocol"` + Port uint16 `json:"port"` + // +optional + Net string `json:"net"` +} + +// New FelixConfiguration creates a new (zeroed) FelixConfiguration struct with the TypeMetadata +// initialized to the current version. +func NewFelixConfiguration() *FelixConfiguration { + return &FelixConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: KindFelixConfiguration, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkpolicy.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkpolicy.go new file mode 100644 index 000000000..3a8f598ed --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkpolicy.go @@ -0,0 +1,129 @@ +// Copyright (c) 2017,2019-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindGlobalNetworkPolicy = "GlobalNetworkPolicy" + KindGlobalNetworkPolicyList = "GlobalNetworkPolicyList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GlobalNetworkPolicyList is a list of Policy objects. +type GlobalNetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []GlobalNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type GlobalNetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec GlobalNetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type GlobalNetworkPolicySpec struct { + // Order is an optional field that specifies the order in which the policy is applied. + // Policies with higher "order" are applied after those with lower + // order. If the order is omitted, it may be considered to be "infinite" - i.e. the + // policy will be applied last. Policies with identical order will be applied in + // alphanumerical order based on the Policy "Name". + Order *float64 `json:"order,omitempty"` + // The ordered set of ingress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Ingress []Rule `json:"ingress,omitempty" validate:"omitempty,dive"` + // The ordered set of egress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Egress []Rule `json:"egress,omitempty" validate:"omitempty,dive"` + // The selector is an expression used to pick pick out the endpoints that the policy should + // be applied to. + // + // Selector expressions follow this syntax: + // + // label == "string_literal" -> comparison, e.g. my_label == "foo bar" + // label != "string_literal" -> not equal; also matches if label is not present + // label in { "a", "b", "c", ... } -> true if the value of label X is one of "a", "b", "c" + // label not in { "a", "b", "c", ... } -> true if the value of label X is not one of "a", "b", "c" + // has(label_name) -> True if that label is present + // ! expr -> negation of expr + // expr && expr -> Short-circuit and + // expr || expr -> Short-circuit or + // ( expr ) -> parens for grouping + // all() or the empty selector -> matches all endpoints. + // + // Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive + // but they do not support escape characters. + // + // Examples (with made-up labels): + // + // type == "webserver" && deployment == "prod" + // type in {"frontend", "backend"} + // deployment != "dev" + // ! has(label_name) + Selector string `json:"selector,omitempty" validate:"selector"` + // Types indicates whether this policy applies to ingress, or to egress, or to both. When + // not explicitly specified (and so the value on creation is empty or nil), Calico defaults + // Types according to what Ingress and Egress rules are present in the policy. The + // default is: + // + // - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + // also no Ingress rules) + // + // - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + // + // - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + // + // When the policy is read back again, Types will always be one of these values, never empty + // or nil. + Types []PolicyType `json:"types,omitempty" validate:"omitempty,dive,policyType"` + + // DoNotTrack indicates whether packets matched by the rules in this policy should go through + // the data plane's connection tracking, such as Linux conntrack. If True, the rules in + // this policy are applied before any data plane connection tracking, and packets allowed by + // this policy are marked as not to be tracked. + DoNotTrack bool `json:"doNotTrack,omitempty"` + // PreDNAT indicates to apply the rules in this policy before any DNAT. + PreDNAT bool `json:"preDNAT,omitempty"` + // ApplyOnForward indicates to apply the rules in this policy on forward traffic. + ApplyOnForward bool `json:"applyOnForward,omitempty"` + + // ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + ServiceAccountSelector string `json:"serviceAccountSelector,omitempty" validate:"selector"` + + // NamespaceSelector is an optional field for an expression used to select a pod based on namespaces. + NamespaceSelector string `json:"namespaceSelector,omitempty" validate:"selector"` +} + +// NewGlobalNetworkPolicy creates a new (zeroed) GlobalNetworkPolicy struct with the TypeMetadata initialised to the current +// version. +func NewGlobalNetworkPolicy() *GlobalNetworkPolicy { + return &GlobalNetworkPolicy{ + TypeMeta: metav1.TypeMeta{ + Kind: KindGlobalNetworkPolicy, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkset.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkset.go new file mode 100644 index 000000000..136d8edbc --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/globalnetworkset.go @@ -0,0 +1,63 @@ +// Copyright (c) 2018, 2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindGlobalNetworkSet = "GlobalNetworkSet" + KindGlobalNetworkSetList = "GlobalNetworkSetList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GlobalNetworkSetList is a list of NetworkSet objects. +type GlobalNetworkSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []GlobalNetworkSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type GlobalNetworkSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec GlobalNetworkSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// GlobalNetworkSetSpec contains the specification for a NetworkSet resource. +type GlobalNetworkSetSpec struct { + // The list of IP networks that belong to this set. + Nets []string `json:"nets,omitempty" validate:"omitempty,dive,cidr"` +} + +// NewGlobalNetworkSet creates a new (zeroed) NetworkSet struct with the TypeMetadata initialised to the current +// version. +func NewGlobalNetworkSet() *GlobalNetworkSet { + return &GlobalNetworkSet{ + TypeMeta: metav1.TypeMeta{ + Kind: KindGlobalNetworkSet, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/hostendpoint.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/hostendpoint.go new file mode 100644 index 000000000..5b8027e5e --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/hostendpoint.go @@ -0,0 +1,102 @@ +// Copyright (c) 2017,2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/projectcalico/api/pkg/lib/numorstring" +) + +const ( + KindHostEndpoint = "HostEndpoint" + KindHostEndpointList = "HostEndpointList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostEndpointList is a list of HostEndpoint objects. +type HostEndpointList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []HostEndpoint `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type HostEndpoint struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec HostEndpointSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// HostEndpointSpec contains the specification for a HostEndpoint resource. +type HostEndpointSpec struct { + // The node name identifying the Calico node instance. + Node string `json:"node,omitempty" validate:"omitempty,name"` + // Either "*", or the name of a specific Linux interface to apply policy to; or empty. "*" + // indicates that this HostEndpoint governs all traffic to, from or through the default + // network namespace of the host named by the "Node" field; entering and leaving that + // namespace via any interface, including those from/to non-host-networked local workloads. + // + // If InterfaceName is not "*", this HostEndpoint only governs traffic that enters or leaves + // the host through the specific interface named by InterfaceName, or - when InterfaceName + // is empty - through the specific interface that has one of the IPs in ExpectedIPs. + // Therefore, when InterfaceName is empty, at least one expected IP must be specified. Only + // external interfaces (such as "eth0") are supported here; it isn't possible for a + // HostEndpoint to protect traffic through a specific local workload interface. + // + // Note: Only some kinds of policy are implemented for "*" HostEndpoints; initially just + // pre-DNAT policy. Please check Calico documentation for the latest position. + InterfaceName string `json:"interfaceName,omitempty" validate:"omitempty,interface"` + // The expected IP addresses (IPv4 and IPv6) of the endpoint. + // If "InterfaceName" is not present, Calico will look for an interface matching any + // of the IPs in the list and apply policy to that. + // Note: + // When using the selector match criteria in an ingress or egress security Policy + // or Profile, Calico converts the selector into a set of IP addresses. For host + // endpoints, the ExpectedIPs field is used for that purpose. (If only the interface + // name is specified, Calico does not learn the IPs of the interface for use in match + // criteria.) + ExpectedIPs []string `json:"expectedIPs,omitempty" validate:"omitempty,dive,ip"` + // A list of identifiers of security Profile objects that apply to this endpoint. Each + // profile is applied in the order that they appear in this list. Profile rules are applied + // after the selector-based security policy. + Profiles []string `json:"profiles,omitempty" validate:"omitempty,dive,name"` + // Ports contains the endpoint's named ports, which may be referenced in security policy rules. + Ports []EndpointPort `json:"ports,omitempty" validate:"dive"` +} + +type EndpointPort struct { + Name string `json:"name" validate:"portName"` + Protocol numorstring.Protocol `json:"protocol"` + Port uint16 `json:"port" validate:"gt=0"` +} + +// NewHostEndpoint creates a new (zeroed) HostEndpoint struct with the TypeMetadata initialised to the current +// version. +func NewHostEndpoint() *HostEndpoint { + return &HostEndpoint{ + TypeMeta: metav1.TypeMeta{ + Kind: KindHostEndpoint, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go new file mode 100644 index 000000000..d6e16847d --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipamconfig.go @@ -0,0 +1,68 @@ +// Copyright (c) 2022 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindIPAMConfiguration = "IPAMConfiguration" + KindIPAMConfigurationList = "IPAMConfigurationList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IPAMConfigurationList contains a list of IPAMConfiguration resources. +type IPAMConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []IPAMConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IPAMConfiguration contains information about a block for IP address assignment. +type IPAMConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec IPAMConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPAMConfigurationSpec contains the specification for an IPPool resource. +type IPAMConfigurationSpec struct { + // When StrictAffinity is true, borrowing IP addresses is not allowed. + StrictAffinity bool `json:"strictAffinity" validate:"required"` + + // MaxBlocksPerHost, if non-zero, is the max number of blocks that can be + // affine to each host. + MaxBlocksPerHost int32 `json:"maxBlocksPerHost,omitempty"` +} + +// NewIPAMConfiguration creates a new (zeroed) IPAMConfiguration struct with the TypeMetadata initialised to the current +// version. +func NewIPAMConfiguration() *IPAMConfiguration { + return &IPAMConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: KindIPPool, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ippool.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ippool.go new file mode 100644 index 000000000..76cc2557b --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ippool.go @@ -0,0 +1,147 @@ +// Copyright (c) 2017, 2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindIPPool = "IPPool" + KindIPPoolList = "IPPoolList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IPPoolList contains a list of IPPool resources. +type IPPoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []IPPool `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type IPPool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec IPPoolSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPPoolSpec contains the specification for an IPPool resource. +type IPPoolSpec struct { + // The pool CIDR. + CIDR string `json:"cidr" validate:"net"` + + // Contains configuration for VXLAN tunneling for this pool. If not specified, + // then this is defaulted to "Never" (i.e. VXLAN tunneling is disabled). + VXLANMode VXLANMode `json:"vxlanMode,omitempty" validate:"omitempty,vxlanMode"` + + // Contains configuration for IPIP tunneling for this pool. If not specified, + // then this is defaulted to "Never" (i.e. IPIP tunneling is disabled). + IPIPMode IPIPMode `json:"ipipMode,omitempty" validate:"omitempty,ipIpMode"` + + // When natOutgoing is true, packets sent from Calico networked containers in + // this pool to destinations outside of this pool will be masqueraded. + NATOutgoing bool `json:"natOutgoing,omitempty"` + + // When disabled is true, Calico IPAM will not assign addresses from this pool. + Disabled bool `json:"disabled,omitempty"` + + // Disable exporting routes from this IP Pool's CIDR over BGP. [Default: false] + DisableBGPExport bool `json:"disableBGPExport,omitempty" validate:"omitempty"` + + // The block size to use for IP address assignments from this pool. Defaults to 26 for IPv4 and 122 for IPv6. + BlockSize int `json:"blockSize,omitempty"` + + // Allows IPPool to allocate for a specific node by label selector. + NodeSelector string `json:"nodeSelector,omitempty" validate:"omitempty,selector"` + + // Deprecated: this field is only used for APIv1 backwards compatibility. + // Setting this field is not allowed, this field is for internal use only. + IPIP *IPIPConfiguration `json:"ipip,omitempty" validate:"omitempty,mustBeNil"` + + // Deprecated: this field is only used for APIv1 backwards compatibility. + // Setting this field is not allowed, this field is for internal use only. + NATOutgoingV1 bool `json:"nat-outgoing,omitempty" validate:"omitempty,mustBeFalse"` + + // AllowedUse controls what the IP pool will be used for. If not specified or empty, defaults to + // ["Tunnel", "Workload"] for back-compatibility + AllowedUses []IPPoolAllowedUse `json:"allowedUses,omitempty" validate:"omitempty"` +} + +type IPPoolAllowedUse string + +const ( + IPPoolAllowedUseWorkload IPPoolAllowedUse = "Workload" + IPPoolAllowedUseTunnel = "Tunnel" +) + +type VXLANMode string + +const ( + VXLANModeNever VXLANMode = "Never" + VXLANModeAlways = "Always" + VXLANModeCrossSubnet = "CrossSubnet" +) + +type IPIPMode string + +const ( + IPIPModeNever IPIPMode = "Never" + IPIPModeAlways = "Always" + IPIPModeCrossSubnet = "CrossSubnet" +) + +// The following definitions are only used for APIv1 backwards compatibility. +// They are for internal use only. +type EncapMode string + +const ( + Undefined EncapMode = "" + Always = "always" + CrossSubnet = "cross-subnet" +) + +const DefaultMode = Always + +type IPIPConfiguration struct { + // When enabled is true, ipip tunneling will be used to deliver packets to + // destinations within this pool. + Enabled bool `json:"enabled,omitempty"` + + // The IPIP mode. This can be one of "always" or "cross-subnet". A mode + // of "always" will also use IPIP tunneling for routing to destination IP + // addresses within this pool. A mode of "cross-subnet" will only use IPIP + // tunneling when the destination node is on a different subnet to the + // originating node. The default value (if not specified) is "always". + Mode EncapMode `json:"mode,omitempty" validate:"ipIpMode"` +} + +// NewIPPool creates a new (zeroed) IPPool struct with the TypeMetadata initialised to the current +// version. +func NewIPPool() *IPPool { + return &IPPool{ + TypeMeta: metav1.TypeMeta{ + Kind: KindIPPool, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go new file mode 100644 index 000000000..f8b43e96c --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/ipreservation.go @@ -0,0 +1,68 @@ +// Copyright (c) 2017, 2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindIPReservation = "IPReservation" + KindIPReservationList = "IPReservationList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IPReservationList contains a list of IPReservation resources. +type IPReservationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []IPReservation `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IPReservation allows certain IP addresses to be reserved (i.e. prevented from being allocated) by Calico +// IPAM. Reservations only block new allocations, they do not cause existing IP allocations to be released. +// The current implementation is only suitable for reserving small numbers of IP addresses relative to the +// size of the IP pool. If large portions of an IP pool are reserved, Calico IPAM may hunt for a long time +// to find a non-reserved IP. +type IPReservation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec IPReservationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IPReservationSpec contains the specification for an IPReservation resource. +type IPReservationSpec struct { + // ReservedCIDRs is a list of CIDRs and/or IP addresses that Calico IPAM will exclude from new allocations. + ReservedCIDRs []string `json:"reservedCIDRs,omitempty" validate:"cidrs,omitempty"` +} + +// NewIPReservation creates a new (zeroed) IPReservation struct with the TypeMetadata initialised to the current +// version. +func NewIPReservation() *IPReservation { + return &IPReservation{ + TypeMeta: metav1.TypeMeta{ + Kind: KindIPReservation, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go new file mode 100644 index 000000000..d2805b7ca --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/kubecontrollersconfig.go @@ -0,0 +1,162 @@ +// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindKubeControllersConfiguration = "KubeControllersConfiguration" + KindKubeControllersConfigurationList = "KubeControllersConfigurationList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeControllersConfigurationList contains a list of KubeControllersConfiguration object. +type KubeControllersConfigurationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []KubeControllersConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type KubeControllersConfiguration struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec KubeControllersConfigurationSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + Status KubeControllersConfigurationStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// KubeControllersConfigurationSpec contains the values of the Kubernetes controllers configuration. +type KubeControllersConfigurationSpec struct { + // LogSeverityScreen is the log severity above which logs are sent to the stdout. [Default: Info] + LogSeverityScreen string `json:"logSeverityScreen,omitempty" validate:"omitempty,logLevel"` + + // HealthChecks enables or disables support for health checks [Default: Enabled] + HealthChecks string `json:"healthChecks,omitempty" validate:"omitempty,oneof=Enabled Disabled"` + + // EtcdV3CompactionPeriod is the period between etcdv3 compaction requests. Set to 0 to disable. [Default: 10m] + EtcdV3CompactionPeriod *metav1.Duration `json:"etcdV3CompactionPeriod,omitempty" validate:"omitempty"` + + // PrometheusMetricsPort is the TCP port that the Prometheus metrics server should bind to. Set to 0 to disable. [Default: 9094] + PrometheusMetricsPort *int `json:"prometheusMetricsPort,omitempty"` + + // Controllers enables and configures individual Kubernetes controllers + Controllers ControllersConfig `json:"controllers"` + + // DebugProfilePort configures the port to serve memory and cpu profiles on. If not specified, profiling + // is disabled. + DebugProfilePort *int32 `json:"debugProfilePort,omitempty"` +} + +// ControllersConfig enables and configures individual Kubernetes controllers +type ControllersConfig struct { + // Node enables and configures the node controller. Enabled by default, set to nil to disable. + Node *NodeControllerConfig `json:"node,omitempty"` + + // Policy enables and configures the policy controller. Enabled by default, set to nil to disable. + Policy *PolicyControllerConfig `json:"policy,omitempty"` + + // WorkloadEndpoint enables and configures the workload endpoint controller. Enabled by default, set to nil to disable. + WorkloadEndpoint *WorkloadEndpointControllerConfig `json:"workloadEndpoint,omitempty"` + + // ServiceAccount enables and configures the service account controller. Enabled by default, set to nil to disable. + ServiceAccount *ServiceAccountControllerConfig `json:"serviceAccount,omitempty"` + + // Namespace enables and configures the namespace controller. Enabled by default, set to nil to disable. + Namespace *NamespaceControllerConfig `json:"namespace,omitempty"` +} + +// NodeControllerConfig configures the node controller, which automatically cleans up configuration +// for nodes that no longer exist. Optionally, it can create host endpoints for all Kubernetes nodes. +type NodeControllerConfig struct { + // ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m] + ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"` + + // SyncLabels controls whether to copy Kubernetes node labels to Calico nodes. [Default: Enabled] + SyncLabels string `json:"syncLabels,omitempty" validate:"omitempty,oneof=Enabled Disabled"` + + // HostEndpoint controls syncing nodes to host endpoints. Disabled by default, set to nil to disable. + HostEndpoint *AutoHostEndpointConfig `json:"hostEndpoint,omitempty"` + + // LeakGracePeriod is the period used by the controller to determine if an IP address has been leaked. + // Set to 0 to disable IP garbage collection. [Default: 15m] + // +optional + LeakGracePeriod *metav1.Duration `json:"leakGracePeriod,omitempty"` +} + +type AutoHostEndpointConfig struct { + // AutoCreate enables automatic creation of host endpoints for every node. [Default: Disabled] + AutoCreate string `json:"autoCreate,omitempty" validate:"omitempty,oneof=Enabled Disabled"` +} + +// PolicyControllerConfig configures the network policy controller, which syncs Kubernetes policies +// to Calico policies (only used for etcdv3 datastore). +type PolicyControllerConfig struct { + // ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m] + ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"` +} + +// WorkloadEndpointControllerConfig configures the workload endpoint controller, which syncs Kubernetes +// labels to Calico workload endpoints (only used for etcdv3 datastore). +type WorkloadEndpointControllerConfig struct { + // ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m] + ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"` +} + +// ServiceAccountControllerConfig configures the service account controller, which syncs Kubernetes +// service accounts to Calico profiles (only used for etcdv3 datastore). +type ServiceAccountControllerConfig struct { + // ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m] + ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"` +} + +// NamespaceControllerConfig configures the service account controller, which syncs Kubernetes +// service accounts to Calico profiles (only used for etcdv3 datastore). +type NamespaceControllerConfig struct { + // ReconcilerPeriod is the period to perform reconciliation with the Calico datastore. [Default: 5m] + ReconcilerPeriod *metav1.Duration `json:"reconcilerPeriod,omitempty" validate:"omitempty"` +} + +// KubeControllersConfigurationStatus represents the status of the configuration. It's useful for admins to +// be able to see the actual config that was applied, which can be modified by environment variables on the +// kube-controllers process. +type KubeControllersConfigurationStatus struct { + // RunningConfig contains the effective config that is running in the kube-controllers pod, after + // merging the API resource with any environment variables. + RunningConfig KubeControllersConfigurationSpec `json:"runningConfig,omitempty"` + + // EnvironmentVars contains the environment variables on the kube-controllers that influenced + // the RunningConfig. + EnvironmentVars map[string]string `json:"environmentVars,omitempty"` +} + +// New KubeControllersConfiguration creates a new (zeroed) KubeControllersConfiguration struct with +// the TypeMetadata initialized to the current version. +func NewKubeControllersConfiguration() *KubeControllersConfiguration { + return &KubeControllersConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: KindKubeControllersConfiguration, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkpolicy.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkpolicy.go new file mode 100644 index 000000000..5b69959ec --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkpolicy.go @@ -0,0 +1,114 @@ +// Copyright (c) 2017,2019,2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindNetworkPolicy = "NetworkPolicy" + KindNetworkPolicyList = "NetworkPolicyList" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicyList is a list of Policy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +type NetworkPolicySpec struct { + // Order is an optional field that specifies the order in which the policy is applied. + // Policies with higher "order" are applied after those with lower + // order. If the order is omitted, it may be considered to be "infinite" - i.e. the + // policy will be applied last. Policies with identical order will be applied in + // alphanumerical order based on the Policy "Name". + Order *float64 `json:"order,omitempty"` + // The ordered set of ingress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Ingress []Rule `json:"ingress,omitempty" validate:"omitempty,dive"` + // The ordered set of egress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Egress []Rule `json:"egress,omitempty" validate:"omitempty,dive"` + // The selector is an expression used to pick pick out the endpoints that the policy should + // be applied to. + // + // Selector expressions follow this syntax: + // + // label == "string_literal" -> comparison, e.g. my_label == "foo bar" + // label != "string_literal" -> not equal; also matches if label is not present + // label in { "a", "b", "c", ... } -> true if the value of label X is one of "a", "b", "c" + // label not in { "a", "b", "c", ... } -> true if the value of label X is not one of "a", "b", "c" + // has(label_name) -> True if that label is present + // ! expr -> negation of expr + // expr && expr -> Short-circuit and + // expr || expr -> Short-circuit or + // ( expr ) -> parens for grouping + // all() or the empty selector -> matches all endpoints. + // + // Label names are allowed to contain alphanumerics, -, _ and /. String literals are more permissive + // but they do not support escape characters. + // + // Examples (with made-up labels): + // + // type == "webserver" && deployment == "prod" + // type in {"frontend", "backend"} + // deployment != "dev" + // ! has(label_name) + Selector string `json:"selector,omitempty" validate:"selector"` + // Types indicates whether this policy applies to ingress, or to egress, or to both. When + // not explicitly specified (and so the value on creation is empty or nil), Calico defaults + // Types according to what Ingress and Egress are present in the policy. The + // default is: + // + // - [ PolicyTypeIngress ], if there are no Egress rules (including the case where there are + // also no Ingress rules) + // + // - [ PolicyTypeEgress ], if there are Egress rules but no Ingress rules + // + // - [ PolicyTypeIngress, PolicyTypeEgress ], if there are both Ingress and Egress rules. + // + // When the policy is read back again, Types will always be one of these values, never empty + // or nil. + Types []PolicyType `json:"types,omitempty" validate:"omitempty,dive,policyType"` + + // ServiceAccountSelector is an optional field for an expression used to select a pod based on service accounts. + ServiceAccountSelector string `json:"serviceAccountSelector,omitempty" validate:"selector"` +} + +// NewNetworkPolicy creates a new (zeroed) NetworkPolicy struct with the TypeMetadata initialised to the current +// version. +func NewNetworkPolicy() *NetworkPolicy { + return &NetworkPolicy{ + TypeMeta: metav1.TypeMeta{ + Kind: KindNetworkPolicy, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkset.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkset.go new file mode 100644 index 000000000..2d1630f33 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/networkset.go @@ -0,0 +1,60 @@ +// Copyright (c) 2019,2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindNetworkSet = "NetworkSet" + KindNetworkSetList = "NetworkSetList" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkSetList is a list of NetworkSet objects. +type NetworkSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []NetworkSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type NetworkSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec NetworkSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// NetworkSetSpec contains the specification for a NetworkSet resource. +type NetworkSetSpec struct { + // The list of IP networks that belong to this set. + Nets []string `json:"nets,omitempty" validate:"omitempty,dive,cidr"` +} + +// NewNetworkSet creates a new (zeroed) NetworkSet struct with the TypeMetadata initialised to the current version. +func NewNetworkSet() *NetworkSet { + return &NetworkSet{ + TypeMeta: metav1.TypeMeta{ + Kind: KindNetworkSet, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go new file mode 100644 index 000000000..c3382024e --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/nodestatus.go @@ -0,0 +1,242 @@ +// Copyright (c) 2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindCalicoNodeStatus = "CalicoNodeStatus" + KindCalicoNodeStatusList = "CalicoNodeStatusList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CalicoNodeStatusList is a list of CalicoNodeStatus resources. +type CalicoNodeStatusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []CalicoNodeStatus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CalicoNodeStatus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec CalicoNodeStatusSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + Status CalicoNodeStatusStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus resource. +type CalicoNodeStatusSpec struct { + // The node name identifies the Calico node instance for node status. + Node string `json:"node,omitempty" validate:"required,name"` + + // Classes declares the types of information to monitor for this calico/node, + // and allows for selective status reporting about certain subsets of information. + Classes []NodeStatusClassType `json:"classes,omitempty" validate:"required,unique"` + + // UpdatePeriodSeconds is the period at which CalicoNodeStatus should be updated. + // Set to 0 to disable CalicoNodeStatus refresh. Maximum update period is one day. + UpdatePeriodSeconds *uint32 `json:"updatePeriodSeconds,omitempty" validate:"required,gte=0,lte=86400"` +} + +// CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. +// No validation needed for status since it is updated by Calico. +type CalicoNodeStatusStatus struct { + // LastUpdated is a timestamp representing the server time when CalicoNodeStatus object + // last updated. It is represented in RFC3339 form and is in UTC. + // +nullable + LastUpdated metav1.Time `json:"lastUpdated,omitempty"` + + // Agent holds agent status on the node. + Agent CalicoNodeAgentStatus `json:"agent,omitempty"` + + // BGP holds node BGP status. + BGP CalicoNodeBGPStatus `json:"bgp,omitempty"` + + // Routes reports routes known to the Calico BGP daemon on the node. + Routes CalicoNodeBGPRouteStatus `json:"routes,omitempty"` +} + +// CalicoNodeAgentStatus defines the observed state of agent status on the node. +type CalicoNodeAgentStatus struct { + // BIRDV4 represents the latest observed status of bird4. + BIRDV4 BGPDaemonStatus `json:"birdV4,omitempty"` + + // BIRDV6 represents the latest observed status of bird6. + BIRDV6 BGPDaemonStatus `json:"birdV6,omitempty"` +} + +// CalicoNodeBGPStatus defines the observed state of BGP status on the node. +type CalicoNodeBGPStatus struct { + // The total number of IPv4 established bgp sessions. + NumberEstablishedV4 int `json:"numberEstablishedV4"` + + // The total number of IPv4 non-established bgp sessions. + NumberNotEstablishedV4 int `json:"numberNotEstablishedV4"` + + // The total number of IPv6 established bgp sessions. + NumberEstablishedV6 int `json:"numberEstablishedV6"` + + // The total number of IPv6 non-established bgp sessions. + NumberNotEstablishedV6 int `json:"numberNotEstablishedV6"` + + // PeersV4 represents IPv4 BGP peers status on the node. + PeersV4 []CalicoNodePeer `json:"peersV4,omitempty"` + + // PeersV6 represents IPv6 BGP peers status on the node. + PeersV6 []CalicoNodePeer `json:"peersV6,omitempty"` +} + +// CalicoNodeBGPRouteStatus defines the observed state of routes status on the node. +type CalicoNodeBGPRouteStatus struct { + // RoutesV4 represents IPv4 routes on the node. + RoutesV4 []CalicoNodeRoute `json:"routesV4,omitempty"` + + // RoutesV6 represents IPv6 routes on the node. + RoutesV6 []CalicoNodeRoute `json:"routesV6,omitempty"` +} + +// BGPDaemonStatus defines the observed state of BGP daemon. +type BGPDaemonStatus struct { + // The state of the BGP Daemon. + State BGPDaemonState `json:"state,omitempty"` + + // Version of the BGP daemon + Version string `json:"version,omitempty"` + + // Router ID used by bird. + RouterID string `json:"routerID,omitempty"` + + // LastBootTime holds the value of lastBootTime from bird.ctl output. + LastBootTime string `json:"lastBootTime,omitempty"` + + // LastReconfigurationTime holds the value of lastReconfigTime from bird.ctl output. + LastReconfigurationTime string `json:"lastReconfigurationTime,omitempty"` +} + +// CalicoNodePeer contains the status of BGP peers on the node. +type CalicoNodePeer struct { + // IP address of the peer whose condition we are reporting. + PeerIP string `json:"peerIP,omitempty" validate:"omitempty,ip"` + + // Type indicates whether this peer is configured via the node-to-node mesh, + // or via en explicit global or per-node BGPPeer object. + Type BGPPeerType `json:"type,omitempty"` + + // State is the BGP session state. + State BGPSessionState `json:"state,omitempty"` + + // Since the state or reason last changed. + Since string `json:"since,omitempty"` +} + +// CalicoNodeRoute contains the status of BGP routes on the node. +type CalicoNodeRoute struct { + // Type indicates if the route is being used for forwarding or not. + Type CalicoNodeRouteType `json:"type,omitempty"` + + // Destination of the route. + Destination string `json:"destination,omitempty"` + + // Gateway for the destination. + Gateway string `json:"gateway,omitempty"` + + // Interface for the destination + Interface string `json:"interface,omitempty"` + + // LearnedFrom contains information regarding where this route originated. + LearnedFrom CalicoNodeRouteLearnedFrom `json:"learnedFrom,omitempty"` +} + +// CalicoNodeRouteLearnedFrom contains the information of the source from which a routes has been learned. +type CalicoNodeRouteLearnedFrom struct { + // Type of the source where a route is learned from. + SourceType CalicoNodeRouteSourceType `json:"sourceType,omitempty"` + + // If sourceType is NodeMesh or BGPPeer, IP address of the router that sent us this route. + PeerIP string `json:"peerIP,omitempty, validate:"omitempty,ip"` +} + +// NewCalicoNodeStatus creates a new (zeroed) CalicoNodeStatus struct with the TypeMetadata initialised to the current +// version. +func NewCalicoNodeStatus() *CalicoNodeStatus { + return &CalicoNodeStatus{ + TypeMeta: metav1.TypeMeta{ + Kind: KindCalicoNodeStatus, + APIVersion: GroupVersionCurrent, + }, + } +} + +type CalicoNodeRouteType string + +const ( + RouteTypeFIB CalicoNodeRouteType = "FIB" + RouteTypeRIB = "RIB" +) + +type CalicoNodeRouteSourceType string + +const ( + RouteSourceTypeKernel CalicoNodeRouteSourceType = "Kernel" + RouteSourceTypeStatic = "Static" + RouteSourceTypeDirect = "Direct" + RouteSourceTypeNodeMesh = "NodeMesh" + RouteSourceTypeBGPPeer = "BGPPeer" +) + +type NodeStatusClassType string + +const ( + NodeStatusClassTypeAgent NodeStatusClassType = "Agent" + NodeStatusClassTypeBGP = "BGP" + NodeStatusClassTypeRoutes = "Routes" +) + +type BGPPeerType string + +const ( + BGPPeerTypeNodeMesh BGPPeerType = "NodeMesh" + BGPPeerTypeNodePeer = "NodePeer" + BGPPeerTypeGlobalPeer = "GlobalPeer" +) + +type BGPDaemonState string + +const ( + BGPDaemonStateReady BGPDaemonState = "Ready" + BGPDaemonStateNotReady = "NotReady" +) + +type BGPSessionState string + +const ( + BGPSessionStateIdle BGPSessionState = "Idle" + BGPSessionStateConnect = "Connect" + BGPSessionStateActive = "Active" + BGPSessionStateOpenSent = "OpenSent" + BGPSessionStateOpenConfirm = "OpenConfirm" + BGPSessionStateEstablished = "Established" + BGPSessionStateClose = "Close" +) diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/policy_common.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/policy_common.go new file mode 100644 index 000000000..74830a60b --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/policy_common.go @@ -0,0 +1,214 @@ +// Copyright (c) 2017-2018,2020-2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + "github.com/projectcalico/api/pkg/lib/numorstring" +) + +// PolicyType enumerates the possible values of the PolicySpec Types field. +type PolicyType string + +const ( + PolicyTypeIngress PolicyType = "Ingress" + PolicyTypeEgress PolicyType = "Egress" +) + +// A Rule encapsulates a set of match criteria and an action. Both selector-based security Policy +// and security Profiles reference rules - separated out as a list of rules for both +// ingress and egress packet matching. +// +// Each positive match criteria has a negated version, prefixed with "Not". All the match +// criteria within a rule must be satisfied for a packet to match. A single rule can contain +// the positive and negative version of a match and both must be satisfied for the rule to match. +type Rule struct { + Action Action `json:"action" validate:"action"` + // IPVersion is an optional field that restricts the rule to only match a specific IP + // version. + IPVersion *int `json:"ipVersion,omitempty" validate:"omitempty,ipVersion"` + // Protocol is an optional field that restricts the rule to only apply to traffic of + // a specific IP protocol. Required if any of the EntityRules contain Ports + // (because ports only apply to certain protocols). + // + // Must be one of these string values: "TCP", "UDP", "ICMP", "ICMPv6", "SCTP", "UDPLite" + // or an integer in the range 1-255. + Protocol *numorstring.Protocol `json:"protocol,omitempty" validate:"omitempty"` + // ICMP is an optional field that restricts the rule to apply to a specific type and + // code of ICMP traffic. This should only be specified if the Protocol field is set to + // "ICMP" or "ICMPv6". + ICMP *ICMPFields `json:"icmp,omitempty" validate:"omitempty"` + // NotProtocol is the negated version of the Protocol field. + NotProtocol *numorstring.Protocol `json:"notProtocol,omitempty" validate:"omitempty"` + // NotICMP is the negated version of the ICMP field. + NotICMP *ICMPFields `json:"notICMP,omitempty" validate:"omitempty"` + // Source contains the match criteria that apply to source entity. + Source EntityRule `json:"source,omitempty" validate:"omitempty"` + // Destination contains the match criteria that apply to destination entity. + Destination EntityRule `json:"destination,omitempty" validate:"omitempty"` + + // HTTP contains match criteria that apply to HTTP requests. + HTTP *HTTPMatch `json:"http,omitempty" validate:"omitempty"` + + // Metadata contains additional information for this rule + Metadata *RuleMetadata `json:"metadata,omitempty" validate:"omitempty"` +} + +// HTTPPath specifies an HTTP path to match. It may be either of the form: +// exact: : which matches the path exactly or +// prefix: : which matches the path prefix +type HTTPPath struct { + Exact string `json:"exact,omitempty" validate:"omitempty"` + Prefix string `json:"prefix,omitempty" validate:"omitempty"` +} + +// HTTPMatch is an optional field that apply only to HTTP requests +// The Methods and Path fields are joined with AND +type HTTPMatch struct { + // Methods is an optional field that restricts the rule to apply only to HTTP requests that use one of the listed + // HTTP Methods (e.g. GET, PUT, etc.) + // Multiple methods are OR'd together. + Methods []string `json:"methods,omitempty" validate:"omitempty"` + // Paths is an optional field that restricts the rule to apply to HTTP requests that use one of the listed + // HTTP Paths. + // Multiple paths are OR'd together. + // e.g: + // - exact: /foo + // - prefix: /bar + // NOTE: Each entry may ONLY specify either a `exact` or a `prefix` match. The validator will check for it. + Paths []HTTPPath `json:"paths,omitempty" validate:"omitempty"` +} + +// ICMPFields defines structure for ICMP and NotICMP sub-struct for ICMP code and type +type ICMPFields struct { + // Match on a specific ICMP type. For example a value of 8 refers to ICMP Echo Request + // (i.e. pings). + Type *int `json:"type,omitempty" validate:"omitempty,gte=0,lte=254"` + // Match on a specific ICMP code. If specified, the Type value must also be specified. + // This is a technical limitation imposed by the kernel's iptables firewall, which + // Calico uses to enforce the rule. + Code *int `json:"code,omitempty" validate:"omitempty,gte=0,lte=255"` +} + +// An EntityRule is a sub-component of a Rule comprising the match criteria specific +// to a particular entity (that is either the source or destination). +// +// A source EntityRule matches the source endpoint and originating traffic. +// A destination EntityRule matches the destination endpoint and terminating traffic. +type EntityRule struct { + // Nets is an optional field that restricts the rule to only apply to traffic that + // originates from (or terminates at) IP addresses in any of the given subnets. + Nets []string `json:"nets,omitempty" validate:"omitempty,dive,net"` + + // Selector is an optional field that contains a selector expression (see Policy for + // sample syntax). Only traffic that originates from (terminates at) endpoints matching + // the selector will be matched. + // + // Note that: in addition to the negated version of the Selector (see NotSelector below), the + // selector expression syntax itself supports negation. The two types of negation are subtly + // different. One negates the set of matched endpoints, the other negates the whole match: + // + // Selector = "!has(my_label)" matches packets that are from other Calico-controlled + // endpoints that do not have the label "my_label". + // + // NotSelector = "has(my_label)" matches packets that are not from Calico-controlled + // endpoints that do have the label "my_label". + // + // The effect is that the latter will accept packets from non-Calico sources whereas the + // former is limited to packets from Calico-controlled endpoints. + Selector string `json:"selector,omitempty" validate:"omitempty,selector"` + + // NamespaceSelector is an optional field that contains a selector expression. Only traffic + // that originates from (or terminates at) endpoints within the selected namespaces will be + // matched. When both NamespaceSelector and another selector are defined on the same rule, then only + // workload endpoints that are matched by both selectors will be selected by the rule. + // + // For NetworkPolicy, an empty NamespaceSelector implies that the Selector is limited to selecting + // only workload endpoints in the same namespace as the NetworkPolicy. + // + // For NetworkPolicy, `global()` NamespaceSelector implies that the Selector is limited to selecting + // only GlobalNetworkSet or HostEndpoint. + // + // For GlobalNetworkPolicy, an empty NamespaceSelector implies the Selector applies to workload + // endpoints across all namespaces. + NamespaceSelector string `json:"namespaceSelector,omitempty" validate:"omitempty,selector"` + + // Services is an optional field that contains options for matching Kubernetes Services. + // If specified, only traffic that originates from or terminates at endpoints within the selected + // service(s) will be matched, and only to/from each endpoint's port. + // + // Services cannot be specified on the same rule as Selector, NotSelector, NamespaceSelector, Nets, + // NotNets or ServiceAccounts. + // + // Ports and NotPorts can only be specified with Services on ingress rules. + Services *ServiceMatch `json:"services,omitempty" validate:"omitempty"` + + // Ports is an optional field that restricts the rule to only apply to traffic that has a + // source (destination) port that matches one of these ranges/values. This value is a + // list of integers or strings that represent ranges of ports. + // + // Since only some protocols have ports, if any ports are specified it requires the + // Protocol match in the Rule to be set to "TCP" or "UDP". + Ports []numorstring.Port `json:"ports,omitempty" validate:"omitempty,dive"` + + // NotNets is the negated version of the Nets field. + NotNets []string `json:"notNets,omitempty" validate:"omitempty,dive,net"` + + // NotSelector is the negated version of the Selector field. See Selector field for + // subtleties with negated selectors. + NotSelector string `json:"notSelector,omitempty" validate:"omitempty,selector"` + + // NotPorts is the negated version of the Ports field. + // Since only some protocols have ports, if any ports are specified it requires the + // Protocol match in the Rule to be set to "TCP" or "UDP". + NotPorts []numorstring.Port `json:"notPorts,omitempty" validate:"omitempty,dive"` + + // ServiceAccounts is an optional field that restricts the rule to only apply to traffic that originates from (or + // terminates at) a pod running as a matching service account. + ServiceAccounts *ServiceAccountMatch `json:"serviceAccounts,omitempty" validate:"omitempty"` +} + +type ServiceMatch struct { + // Name specifies the name of a Kubernetes Service to match. + Name string `json:"name,omitempty" validate:"omitempty,name"` + + // Namespace specifies the namespace of the given Service. If left empty, the rule + // will match within this policy's namespace. + Namespace string `json:"namespace,omitempty" validate:"omitempty,name"` +} + +type ServiceAccountMatch struct { + // Names is an optional field that restricts the rule to only apply to traffic that originates from (or terminates + // at) a pod running as a service account whose name is in the list. + Names []string `json:"names,omitempty" validate:"omitempty"` + + // Selector is an optional field that restricts the rule to only apply to traffic that originates from + // (or terminates at) a pod running as a service account that matches the given label selector. + // If both Names and Selector are specified then they are AND'ed. + Selector string `json:"selector,omitempty" validate:"omitempty,selector"` +} + +type Action string + +const ( + Allow Action = "Allow" + Deny = "Deny" + Log = "Log" + Pass = "Pass" +) + +type RuleMetadata struct { + // Annotations is a set of key value pairs that give extra information about the rule + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/profile.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/profile.go new file mode 100644 index 000000000..cb296de7e --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/profile.go @@ -0,0 +1,71 @@ +// Copyright (c) 2017,2021 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KindProfile = "Profile" + KindProfileList = "ProfileList" +) + +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProfileList is a list of Profile objects. +type ProfileList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []Profile `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type Profile struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec ProfileSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ProfileSpec contains the specification for a security Profile resource. +type ProfileSpec struct { + // The ordered set of ingress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Ingress []Rule `json:"ingress,omitempty" validate:"omitempty,dive"` + // The ordered set of egress rules. Each rule contains a set of packet match criteria and + // a corresponding action to apply. + Egress []Rule `json:"egress,omitempty" validate:"omitempty,dive"` + // An option set of labels to apply to each endpoint (in addition to their own labels) + // referencing this profile. If labels configured on the endpoint have keys matching those + // labels inherited from the profile, the endpoint label values take precedence. + LabelsToApply map[string]string `json:"labelsToApply,omitempty" validate:"omitempty,labels"` +} + +// NewProfile creates a new (zeroed) Profile struct with the TypeMetadata initialised to the current +// version. +func NewProfile() *Profile { + return &Profile{ + TypeMeta: metav1.TypeMeta{ + Kind: KindProfile, + APIVersion: GroupVersionCurrent, + }, + } +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go new file mode 100644 index 000000000..870be0cdb --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/register.go @@ -0,0 +1,79 @@ +// Copyright (c) 2019-2022 Tigera, Inc. All rights reserved. + +package v3 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "projectcalico.org" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v3"} +var SchemeGroupVersionInternal = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme + AllKnownTypes = []runtime.Object{ + &NetworkPolicy{}, + &NetworkPolicyList{}, + &GlobalNetworkPolicy{}, + &GlobalNetworkPolicyList{}, + &GlobalNetworkSet{}, + &GlobalNetworkSetList{}, + &HostEndpoint{}, + &HostEndpointList{}, + &IPPool{}, + &IPPoolList{}, + &IPReservation{}, + &IPReservationList{}, + &BGPConfiguration{}, + &BGPConfigurationList{}, + &BGPFilter{}, + &BGPFilterList{}, + &BGPPeer{}, + &BGPPeerList{}, + &Profile{}, + &ProfileList{}, + &FelixConfiguration{}, + &FelixConfigurationList{}, + &KubeControllersConfiguration{}, + &KubeControllersConfigurationList{}, + &ClusterInformation{}, + &ClusterInformationList{}, + &NetworkSet{}, + &NetworkSetList{}, + &CalicoNodeStatus{}, + &CalicoNodeStatusList{}, + &IPAMConfiguration{}, + &IPAMConfigurationList{}, + &BlockAffinity{}, + &BlockAffinityList{}, + &BGPFilter{}, + &BGPFilterList{}, + } +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addConversionFuncs) +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, AllKnownTypes...) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.deepcopy.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.deepcopy.go new file mode 100644 index 000000000..ec83d669f --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.deepcopy.go @@ -0,0 +1,2844 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v3 + +import ( + numorstring "github.com/projectcalico/api/pkg/lib/numorstring" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoHostEndpointConfig) DeepCopyInto(out *AutoHostEndpointConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoHostEndpointConfig. +func (in *AutoHostEndpointConfig) DeepCopy() *AutoHostEndpointConfig { + if in == nil { + return nil + } + out := new(AutoHostEndpointConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPConfiguration) DeepCopyInto(out *BGPConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPConfiguration. +func (in *BGPConfiguration) DeepCopy() *BGPConfiguration { + if in == nil { + return nil + } + out := new(BGPConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPConfigurationList) DeepCopyInto(out *BGPConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BGPConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPConfigurationList. +func (in *BGPConfigurationList) DeepCopy() *BGPConfigurationList { + if in == nil { + return nil + } + out := new(BGPConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPConfigurationSpec) DeepCopyInto(out *BGPConfigurationSpec) { + *out = *in + if in.NodeToNodeMeshEnabled != nil { + in, out := &in.NodeToNodeMeshEnabled, &out.NodeToNodeMeshEnabled + *out = new(bool) + **out = **in + } + if in.ASNumber != nil { + in, out := &in.ASNumber, &out.ASNumber + *out = new(numorstring.ASNumber) + **out = **in + } + if in.ServiceLoadBalancerIPs != nil { + in, out := &in.ServiceLoadBalancerIPs, &out.ServiceLoadBalancerIPs + *out = make([]ServiceLoadBalancerIPBlock, len(*in)) + copy(*out, *in) + } + if in.ServiceExternalIPs != nil { + in, out := &in.ServiceExternalIPs, &out.ServiceExternalIPs + *out = make([]ServiceExternalIPBlock, len(*in)) + copy(*out, *in) + } + if in.ServiceClusterIPs != nil { + in, out := &in.ServiceClusterIPs, &out.ServiceClusterIPs + *out = make([]ServiceClusterIPBlock, len(*in)) + copy(*out, *in) + } + if in.Communities != nil { + in, out := &in.Communities, &out.Communities + *out = make([]Community, len(*in)) + copy(*out, *in) + } + if in.PrefixAdvertisements != nil { + in, out := &in.PrefixAdvertisements, &out.PrefixAdvertisements + *out = make([]PrefixAdvertisement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeMeshPassword != nil { + in, out := &in.NodeMeshPassword, &out.NodeMeshPassword + *out = new(BGPPassword) + (*in).DeepCopyInto(*out) + } + if in.NodeMeshMaxRestartTime != nil { + in, out := &in.NodeMeshMaxRestartTime, &out.NodeMeshMaxRestartTime + *out = new(v1.Duration) + **out = **in + } + if in.BindMode != nil { + in, out := &in.BindMode, &out.BindMode + *out = new(BindMode) + **out = **in + } + if in.IgnoredInterfaces != nil { + in, out := &in.IgnoredInterfaces, &out.IgnoredInterfaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPConfigurationSpec. +func (in *BGPConfigurationSpec) DeepCopy() *BGPConfigurationSpec { + if in == nil { + return nil + } + out := new(BGPConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPDaemonStatus) DeepCopyInto(out *BGPDaemonStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPDaemonStatus. +func (in *BGPDaemonStatus) DeepCopy() *BGPDaemonStatus { + if in == nil { + return nil + } + out := new(BGPDaemonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFilter) DeepCopyInto(out *BGPFilter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFilter. +func (in *BGPFilter) DeepCopy() *BGPFilter { + if in == nil { + return nil + } + out := new(BGPFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPFilter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFilterList) DeepCopyInto(out *BGPFilterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BGPFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFilterList. +func (in *BGPFilterList) DeepCopy() *BGPFilterList { + if in == nil { + return nil + } + out := new(BGPFilterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPFilterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFilterRuleV4) DeepCopyInto(out *BGPFilterRuleV4) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFilterRuleV4. +func (in *BGPFilterRuleV4) DeepCopy() *BGPFilterRuleV4 { + if in == nil { + return nil + } + out := new(BGPFilterRuleV4) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFilterRuleV6) DeepCopyInto(out *BGPFilterRuleV6) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFilterRuleV6. +func (in *BGPFilterRuleV6) DeepCopy() *BGPFilterRuleV6 { + if in == nil { + return nil + } + out := new(BGPFilterRuleV6) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPFilterSpec) DeepCopyInto(out *BGPFilterSpec) { + *out = *in + if in.ExportV4 != nil { + in, out := &in.ExportV4, &out.ExportV4 + *out = make([]BGPFilterRuleV4, len(*in)) + copy(*out, *in) + } + if in.ImportV4 != nil { + in, out := &in.ImportV4, &out.ImportV4 + *out = make([]BGPFilterRuleV4, len(*in)) + copy(*out, *in) + } + if in.ExportV6 != nil { + in, out := &in.ExportV6, &out.ExportV6 + *out = make([]BGPFilterRuleV6, len(*in)) + copy(*out, *in) + } + if in.ImportV6 != nil { + in, out := &in.ImportV6, &out.ImportV6 + *out = make([]BGPFilterRuleV6, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPFilterSpec. +func (in *BGPFilterSpec) DeepCopy() *BGPFilterSpec { + if in == nil { + return nil + } + out := new(BGPFilterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPPassword) DeepCopyInto(out *BGPPassword) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPPassword. +func (in *BGPPassword) DeepCopy() *BGPPassword { + if in == nil { + return nil + } + out := new(BGPPassword) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPPeer) DeepCopyInto(out *BGPPeer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPPeer. +func (in *BGPPeer) DeepCopy() *BGPPeer { + if in == nil { + return nil + } + out := new(BGPPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPPeer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPPeerList) DeepCopyInto(out *BGPPeerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BGPPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPPeerList. +func (in *BGPPeerList) DeepCopy() *BGPPeerList { + if in == nil { + return nil + } + out := new(BGPPeerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BGPPeerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BGPPeerSpec) DeepCopyInto(out *BGPPeerSpec) { + *out = *in + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(BGPPassword) + (*in).DeepCopyInto(*out) + } + if in.MaxRestartTime != nil { + in, out := &in.MaxRestartTime, &out.MaxRestartTime + *out = new(v1.Duration) + **out = **in + } + if in.NumAllowedLocalASNumbers != nil { + in, out := &in.NumAllowedLocalASNumbers, &out.NumAllowedLocalASNumbers + *out = new(int32) + **out = **in + } + if in.TTLSecurity != nil { + in, out := &in.TTLSecurity, &out.TTLSecurity + *out = new(byte) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BGPPeerSpec. +func (in *BGPPeerSpec) DeepCopy() *BGPPeerSpec { + if in == nil { + return nil + } + out := new(BGPPeerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockAffinity) DeepCopyInto(out *BlockAffinity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinity. +func (in *BlockAffinity) DeepCopy() *BlockAffinity { + if in == nil { + return nil + } + out := new(BlockAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BlockAffinity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockAffinityList) DeepCopyInto(out *BlockAffinityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BlockAffinity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinityList. +func (in *BlockAffinityList) DeepCopy() *BlockAffinityList { + if in == nil { + return nil + } + out := new(BlockAffinityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BlockAffinityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockAffinitySpec) DeepCopyInto(out *BlockAffinitySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockAffinitySpec. +func (in *BlockAffinitySpec) DeepCopy() *BlockAffinitySpec { + if in == nil { + return nil + } + out := new(BlockAffinitySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeAgentStatus) DeepCopyInto(out *CalicoNodeAgentStatus) { + *out = *in + out.BIRDV4 = in.BIRDV4 + out.BIRDV6 = in.BIRDV6 + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeAgentStatus. +func (in *CalicoNodeAgentStatus) DeepCopy() *CalicoNodeAgentStatus { + if in == nil { + return nil + } + out := new(CalicoNodeAgentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeBGPRouteStatus) DeepCopyInto(out *CalicoNodeBGPRouteStatus) { + *out = *in + if in.RoutesV4 != nil { + in, out := &in.RoutesV4, &out.RoutesV4 + *out = make([]CalicoNodeRoute, len(*in)) + copy(*out, *in) + } + if in.RoutesV6 != nil { + in, out := &in.RoutesV6, &out.RoutesV6 + *out = make([]CalicoNodeRoute, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeBGPRouteStatus. +func (in *CalicoNodeBGPRouteStatus) DeepCopy() *CalicoNodeBGPRouteStatus { + if in == nil { + return nil + } + out := new(CalicoNodeBGPRouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeBGPStatus) DeepCopyInto(out *CalicoNodeBGPStatus) { + *out = *in + if in.PeersV4 != nil { + in, out := &in.PeersV4, &out.PeersV4 + *out = make([]CalicoNodePeer, len(*in)) + copy(*out, *in) + } + if in.PeersV6 != nil { + in, out := &in.PeersV6, &out.PeersV6 + *out = make([]CalicoNodePeer, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeBGPStatus. +func (in *CalicoNodeBGPStatus) DeepCopy() *CalicoNodeBGPStatus { + if in == nil { + return nil + } + out := new(CalicoNodeBGPStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodePeer) DeepCopyInto(out *CalicoNodePeer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodePeer. +func (in *CalicoNodePeer) DeepCopy() *CalicoNodePeer { + if in == nil { + return nil + } + out := new(CalicoNodePeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeRoute) DeepCopyInto(out *CalicoNodeRoute) { + *out = *in + out.LearnedFrom = in.LearnedFrom + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeRoute. +func (in *CalicoNodeRoute) DeepCopy() *CalicoNodeRoute { + if in == nil { + return nil + } + out := new(CalicoNodeRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeRouteLearnedFrom) DeepCopyInto(out *CalicoNodeRouteLearnedFrom) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeRouteLearnedFrom. +func (in *CalicoNodeRouteLearnedFrom) DeepCopy() *CalicoNodeRouteLearnedFrom { + if in == nil { + return nil + } + out := new(CalicoNodeRouteLearnedFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeStatus) DeepCopyInto(out *CalicoNodeStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeStatus. +func (in *CalicoNodeStatus) DeepCopy() *CalicoNodeStatus { + if in == nil { + return nil + } + out := new(CalicoNodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CalicoNodeStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeStatusList) DeepCopyInto(out *CalicoNodeStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CalicoNodeStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeStatusList. +func (in *CalicoNodeStatusList) DeepCopy() *CalicoNodeStatusList { + if in == nil { + return nil + } + out := new(CalicoNodeStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CalicoNodeStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeStatusSpec) DeepCopyInto(out *CalicoNodeStatusSpec) { + *out = *in + if in.Classes != nil { + in, out := &in.Classes, &out.Classes + *out = make([]NodeStatusClassType, len(*in)) + copy(*out, *in) + } + if in.UpdatePeriodSeconds != nil { + in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeStatusSpec. +func (in *CalicoNodeStatusSpec) DeepCopy() *CalicoNodeStatusSpec { + if in == nil { + return nil + } + out := new(CalicoNodeStatusSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CalicoNodeStatusStatus) DeepCopyInto(out *CalicoNodeStatusStatus) { + *out = *in + in.LastUpdated.DeepCopyInto(&out.LastUpdated) + out.Agent = in.Agent + in.BGP.DeepCopyInto(&out.BGP) + in.Routes.DeepCopyInto(&out.Routes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CalicoNodeStatusStatus. +func (in *CalicoNodeStatusStatus) DeepCopy() *CalicoNodeStatusStatus { + if in == nil { + return nil + } + out := new(CalicoNodeStatusStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInformation) DeepCopyInto(out *ClusterInformation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInformation. +func (in *ClusterInformation) DeepCopy() *ClusterInformation { + if in == nil { + return nil + } + out := new(ClusterInformation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterInformation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInformationList) DeepCopyInto(out *ClusterInformationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterInformation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInformationList. +func (in *ClusterInformationList) DeepCopy() *ClusterInformationList { + if in == nil { + return nil + } + out := new(ClusterInformationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterInformationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInformationSpec) DeepCopyInto(out *ClusterInformationSpec) { + *out = *in + if in.DatastoreReady != nil { + in, out := &in.DatastoreReady, &out.DatastoreReady + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInformationSpec. +func (in *ClusterInformationSpec) DeepCopy() *ClusterInformationSpec { + if in == nil { + return nil + } + out := new(ClusterInformationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Community) DeepCopyInto(out *Community) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Community. +func (in *Community) DeepCopy() *Community { + if in == nil { + return nil + } + out := new(Community) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllersConfig) DeepCopyInto(out *ControllersConfig) { + *out = *in + if in.Node != nil { + in, out := &in.Node, &out.Node + *out = new(NodeControllerConfig) + (*in).DeepCopyInto(*out) + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(PolicyControllerConfig) + (*in).DeepCopyInto(*out) + } + if in.WorkloadEndpoint != nil { + in, out := &in.WorkloadEndpoint, &out.WorkloadEndpoint + *out = new(WorkloadEndpointControllerConfig) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccount != nil { + in, out := &in.ServiceAccount, &out.ServiceAccount + *out = new(ServiceAccountControllerConfig) + (*in).DeepCopyInto(*out) + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(NamespaceControllerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllersConfig. +func (in *ControllersConfig) DeepCopy() *ControllersConfig { + if in == nil { + return nil + } + out := new(ControllersConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + out.Protocol = in.Protocol + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EntityRule) DeepCopyInto(out *EntityRule) { + *out = *in + if in.Nets != nil { + in, out := &in.Nets, &out.Nets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = new(ServiceMatch) + **out = **in + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]numorstring.Port, len(*in)) + copy(*out, *in) + } + if in.NotNets != nil { + in, out := &in.NotNets, &out.NotNets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NotPorts != nil { + in, out := &in.NotPorts, &out.NotPorts + *out = make([]numorstring.Port, len(*in)) + copy(*out, *in) + } + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = new(ServiceAccountMatch) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EntityRule. +func (in *EntityRule) DeepCopy() *EntityRule { + if in == nil { + return nil + } + out := new(EntityRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FelixConfiguration) DeepCopyInto(out *FelixConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfiguration. +func (in *FelixConfiguration) DeepCopy() *FelixConfiguration { + if in == nil { + return nil + } + out := new(FelixConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FelixConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FelixConfigurationList) DeepCopyInto(out *FelixConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FelixConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfigurationList. +func (in *FelixConfigurationList) DeepCopy() *FelixConfigurationList { + if in == nil { + return nil + } + out := new(FelixConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FelixConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FelixConfigurationSpec) DeepCopyInto(out *FelixConfigurationSpec) { + *out = *in + if in.UseInternalDataplaneDriver != nil { + in, out := &in.UseInternalDataplaneDriver, &out.UseInternalDataplaneDriver + *out = new(bool) + **out = **in + } + if in.DataplaneWatchdogTimeout != nil { + in, out := &in.DataplaneWatchdogTimeout, &out.DataplaneWatchdogTimeout + *out = new(v1.Duration) + **out = **in + } + if in.IPv6Support != nil { + in, out := &in.IPv6Support, &out.IPv6Support + *out = new(bool) + **out = **in + } + if in.RouteRefreshInterval != nil { + in, out := &in.RouteRefreshInterval, &out.RouteRefreshInterval + *out = new(v1.Duration) + **out = **in + } + if in.InterfaceRefreshInterval != nil { + in, out := &in.InterfaceRefreshInterval, &out.InterfaceRefreshInterval + *out = new(v1.Duration) + **out = **in + } + if in.IptablesRefreshInterval != nil { + in, out := &in.IptablesRefreshInterval, &out.IptablesRefreshInterval + *out = new(v1.Duration) + **out = **in + } + if in.IptablesPostWriteCheckInterval != nil { + in, out := &in.IptablesPostWriteCheckInterval, &out.IptablesPostWriteCheckInterval + *out = new(v1.Duration) + **out = **in + } + if in.IptablesLockTimeout != nil { + in, out := &in.IptablesLockTimeout, &out.IptablesLockTimeout + *out = new(v1.Duration) + **out = **in + } + if in.IptablesLockProbeInterval != nil { + in, out := &in.IptablesLockProbeInterval, &out.IptablesLockProbeInterval + *out = new(v1.Duration) + **out = **in + } + if in.IpsetsRefreshInterval != nil { + in, out := &in.IpsetsRefreshInterval, &out.IpsetsRefreshInterval + *out = new(v1.Duration) + **out = **in + } + if in.MaxIpsetSize != nil { + in, out := &in.MaxIpsetSize, &out.MaxIpsetSize + *out = new(int) + **out = **in + } + if in.IptablesBackend != nil { + in, out := &in.IptablesBackend, &out.IptablesBackend + *out = new(IptablesBackend) + **out = **in + } + if in.XDPRefreshInterval != nil { + in, out := &in.XDPRefreshInterval, &out.XDPRefreshInterval + *out = new(v1.Duration) + **out = **in + } + if in.NetlinkTimeout != nil { + in, out := &in.NetlinkTimeout, &out.NetlinkTimeout + *out = new(v1.Duration) + **out = **in + } + if in.MetadataPort != nil { + in, out := &in.MetadataPort, &out.MetadataPort + *out = new(int) + **out = **in + } + if in.IPIPEnabled != nil { + in, out := &in.IPIPEnabled, &out.IPIPEnabled + *out = new(bool) + **out = **in + } + if in.IPIPMTU != nil { + in, out := &in.IPIPMTU, &out.IPIPMTU + *out = new(int) + **out = **in + } + if in.VXLANEnabled != nil { + in, out := &in.VXLANEnabled, &out.VXLANEnabled + *out = new(bool) + **out = **in + } + if in.VXLANMTU != nil { + in, out := &in.VXLANMTU, &out.VXLANMTU + *out = new(int) + **out = **in + } + if in.VXLANMTUV6 != nil { + in, out := &in.VXLANMTUV6, &out.VXLANMTUV6 + *out = new(int) + **out = **in + } + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(int) + **out = **in + } + if in.VXLANVNI != nil { + in, out := &in.VXLANVNI, &out.VXLANVNI + *out = new(int) + **out = **in + } + if in.AllowVXLANPacketsFromWorkloads != nil { + in, out := &in.AllowVXLANPacketsFromWorkloads, &out.AllowVXLANPacketsFromWorkloads + *out = new(bool) + **out = **in + } + if in.AllowIPIPPacketsFromWorkloads != nil { + in, out := &in.AllowIPIPPacketsFromWorkloads, &out.AllowIPIPPacketsFromWorkloads + *out = new(bool) + **out = **in + } + if in.ReportingInterval != nil { + in, out := &in.ReportingInterval, &out.ReportingInterval + *out = new(v1.Duration) + **out = **in + } + if in.ReportingTTL != nil { + in, out := &in.ReportingTTL, &out.ReportingTTL + *out = new(v1.Duration) + **out = **in + } + if in.EndpointReportingEnabled != nil { + in, out := &in.EndpointReportingEnabled, &out.EndpointReportingEnabled + *out = new(bool) + **out = **in + } + if in.EndpointReportingDelay != nil { + in, out := &in.EndpointReportingDelay, &out.EndpointReportingDelay + *out = new(v1.Duration) + **out = **in + } + if in.IptablesMarkMask != nil { + in, out := &in.IptablesMarkMask, &out.IptablesMarkMask + *out = new(uint32) + **out = **in + } + if in.DisableConntrackInvalidCheck != nil { + in, out := &in.DisableConntrackInvalidCheck, &out.DisableConntrackInvalidCheck + *out = new(bool) + **out = **in + } + if in.HealthEnabled != nil { + in, out := &in.HealthEnabled, &out.HealthEnabled + *out = new(bool) + **out = **in + } + if in.HealthHost != nil { + in, out := &in.HealthHost, &out.HealthHost + *out = new(string) + **out = **in + } + if in.HealthPort != nil { + in, out := &in.HealthPort, &out.HealthPort + *out = new(int) + **out = **in + } + if in.HealthTimeoutOverrides != nil { + in, out := &in.HealthTimeoutOverrides, &out.HealthTimeoutOverrides + *out = make([]HealthTimeoutOverride, len(*in)) + copy(*out, *in) + } + if in.PrometheusMetricsEnabled != nil { + in, out := &in.PrometheusMetricsEnabled, &out.PrometheusMetricsEnabled + *out = new(bool) + **out = **in + } + if in.PrometheusMetricsPort != nil { + in, out := &in.PrometheusMetricsPort, &out.PrometheusMetricsPort + *out = new(int) + **out = **in + } + if in.PrometheusGoMetricsEnabled != nil { + in, out := &in.PrometheusGoMetricsEnabled, &out.PrometheusGoMetricsEnabled + *out = new(bool) + **out = **in + } + if in.PrometheusProcessMetricsEnabled != nil { + in, out := &in.PrometheusProcessMetricsEnabled, &out.PrometheusProcessMetricsEnabled + *out = new(bool) + **out = **in + } + if in.PrometheusWireGuardMetricsEnabled != nil { + in, out := &in.PrometheusWireGuardMetricsEnabled, &out.PrometheusWireGuardMetricsEnabled + *out = new(bool) + **out = **in + } + if in.FailsafeInboundHostPorts != nil { + in, out := &in.FailsafeInboundHostPorts, &out.FailsafeInboundHostPorts + *out = new([]ProtoPort) + if **in != nil { + in, out := *in, *out + *out = make([]ProtoPort, len(*in)) + copy(*out, *in) + } + } + if in.FailsafeOutboundHostPorts != nil { + in, out := &in.FailsafeOutboundHostPorts, &out.FailsafeOutboundHostPorts + *out = new([]ProtoPort) + if **in != nil { + in, out := *in, *out + *out = make([]ProtoPort, len(*in)) + copy(*out, *in) + } + } + if in.KubeNodePortRanges != nil { + in, out := &in.KubeNodePortRanges, &out.KubeNodePortRanges + *out = new([]numorstring.Port) + if **in != nil { + in, out := *in, *out + *out = make([]numorstring.Port, len(*in)) + copy(*out, *in) + } + } + if in.UsageReportingEnabled != nil { + in, out := &in.UsageReportingEnabled, &out.UsageReportingEnabled + *out = new(bool) + **out = **in + } + if in.UsageReportingInitialDelay != nil { + in, out := &in.UsageReportingInitialDelay, &out.UsageReportingInitialDelay + *out = new(v1.Duration) + **out = **in + } + if in.UsageReportingInterval != nil { + in, out := &in.UsageReportingInterval, &out.UsageReportingInterval + *out = new(v1.Duration) + **out = **in + } + if in.NATPortRange != nil { + in, out := &in.NATPortRange, &out.NATPortRange + *out = new(numorstring.Port) + **out = **in + } + if in.DeviceRouteProtocol != nil { + in, out := &in.DeviceRouteProtocol, &out.DeviceRouteProtocol + *out = new(int) + **out = **in + } + if in.RemoveExternalRoutes != nil { + in, out := &in.RemoveExternalRoutes, &out.RemoveExternalRoutes + *out = new(bool) + **out = **in + } + if in.ExternalNodesCIDRList != nil { + in, out := &in.ExternalNodesCIDRList, &out.ExternalNodesCIDRList + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.DebugDisableLogDropping != nil { + in, out := &in.DebugDisableLogDropping, &out.DebugDisableLogDropping + *out = new(bool) + **out = **in + } + if in.DebugSimulateCalcGraphHangAfter != nil { + in, out := &in.DebugSimulateCalcGraphHangAfter, &out.DebugSimulateCalcGraphHangAfter + *out = new(v1.Duration) + **out = **in + } + if in.DebugSimulateDataplaneHangAfter != nil { + in, out := &in.DebugSimulateDataplaneHangAfter, &out.DebugSimulateDataplaneHangAfter + *out = new(v1.Duration) + **out = **in + } + if in.SidecarAccelerationEnabled != nil { + in, out := &in.SidecarAccelerationEnabled, &out.SidecarAccelerationEnabled + *out = new(bool) + **out = **in + } + if in.XDPEnabled != nil { + in, out := &in.XDPEnabled, &out.XDPEnabled + *out = new(bool) + **out = **in + } + if in.GenericXDPEnabled != nil { + in, out := &in.GenericXDPEnabled, &out.GenericXDPEnabled + *out = new(bool) + **out = **in + } + if in.BPFEnabled != nil { + in, out := &in.BPFEnabled, &out.BPFEnabled + *out = new(bool) + **out = **in + } + if in.BPFDisableUnprivileged != nil { + in, out := &in.BPFDisableUnprivileged, &out.BPFDisableUnprivileged + *out = new(bool) + **out = **in + } + if in.BPFConnectTimeLoadBalancingEnabled != nil { + in, out := &in.BPFConnectTimeLoadBalancingEnabled, &out.BPFConnectTimeLoadBalancingEnabled + *out = new(bool) + **out = **in + } + if in.BPFDSROptoutCIDRs != nil { + in, out := &in.BPFDSROptoutCIDRs, &out.BPFDSROptoutCIDRs + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.BPFExtToServiceConnmark != nil { + in, out := &in.BPFExtToServiceConnmark, &out.BPFExtToServiceConnmark + *out = new(int) + **out = **in + } + if in.BPFKubeProxyIptablesCleanupEnabled != nil { + in, out := &in.BPFKubeProxyIptablesCleanupEnabled, &out.BPFKubeProxyIptablesCleanupEnabled + *out = new(bool) + **out = **in + } + if in.BPFKubeProxyMinSyncPeriod != nil { + in, out := &in.BPFKubeProxyMinSyncPeriod, &out.BPFKubeProxyMinSyncPeriod + *out = new(v1.Duration) + **out = **in + } + if in.BPFKubeProxyEndpointSlicesEnabled != nil { + in, out := &in.BPFKubeProxyEndpointSlicesEnabled, &out.BPFKubeProxyEndpointSlicesEnabled + *out = new(bool) + **out = **in + } + if in.BPFPSNATPorts != nil { + in, out := &in.BPFPSNATPorts, &out.BPFPSNATPorts + *out = new(numorstring.Port) + **out = **in + } + if in.BPFMapSizeNATFrontend != nil { + in, out := &in.BPFMapSizeNATFrontend, &out.BPFMapSizeNATFrontend + *out = new(int) + **out = **in + } + if in.BPFMapSizeNATBackend != nil { + in, out := &in.BPFMapSizeNATBackend, &out.BPFMapSizeNATBackend + *out = new(int) + **out = **in + } + if in.BPFMapSizeNATAffinity != nil { + in, out := &in.BPFMapSizeNATAffinity, &out.BPFMapSizeNATAffinity + *out = new(int) + **out = **in + } + if in.BPFMapSizeRoute != nil { + in, out := &in.BPFMapSizeRoute, &out.BPFMapSizeRoute + *out = new(int) + **out = **in + } + if in.BPFMapSizeConntrack != nil { + in, out := &in.BPFMapSizeConntrack, &out.BPFMapSizeConntrack + *out = new(int) + **out = **in + } + if in.BPFMapSizeIPSets != nil { + in, out := &in.BPFMapSizeIPSets, &out.BPFMapSizeIPSets + *out = new(int) + **out = **in + } + if in.BPFMapSizeIfState != nil { + in, out := &in.BPFMapSizeIfState, &out.BPFMapSizeIfState + *out = new(int) + **out = **in + } + if in.BPFHostConntrackBypass != nil { + in, out := &in.BPFHostConntrackBypass, &out.BPFHostConntrackBypass + *out = new(bool) + **out = **in + } + if in.BPFPolicyDebugEnabled != nil { + in, out := &in.BPFPolicyDebugEnabled, &out.BPFPolicyDebugEnabled + *out = new(bool) + **out = **in + } + if in.RouteTableRanges != nil { + in, out := &in.RouteTableRanges, &out.RouteTableRanges + *out = new(RouteTableRanges) + if **in != nil { + in, out := *in, *out + *out = make([]RouteTableIDRange, len(*in)) + copy(*out, *in) + } + } + if in.RouteTableRange != nil { + in, out := &in.RouteTableRange, &out.RouteTableRange + *out = new(RouteTableRange) + **out = **in + } + if in.RouteSyncDisabled != nil { + in, out := &in.RouteSyncDisabled, &out.RouteSyncDisabled + *out = new(bool) + **out = **in + } + if in.WireguardEnabled != nil { + in, out := &in.WireguardEnabled, &out.WireguardEnabled + *out = new(bool) + **out = **in + } + if in.WireguardEnabledV6 != nil { + in, out := &in.WireguardEnabledV6, &out.WireguardEnabledV6 + *out = new(bool) + **out = **in + } + if in.WireguardListeningPort != nil { + in, out := &in.WireguardListeningPort, &out.WireguardListeningPort + *out = new(int) + **out = **in + } + if in.WireguardListeningPortV6 != nil { + in, out := &in.WireguardListeningPortV6, &out.WireguardListeningPortV6 + *out = new(int) + **out = **in + } + if in.WireguardRoutingRulePriority != nil { + in, out := &in.WireguardRoutingRulePriority, &out.WireguardRoutingRulePriority + *out = new(int) + **out = **in + } + if in.WireguardMTU != nil { + in, out := &in.WireguardMTU, &out.WireguardMTU + *out = new(int) + **out = **in + } + if in.WireguardMTUV6 != nil { + in, out := &in.WireguardMTUV6, &out.WireguardMTUV6 + *out = new(int) + **out = **in + } + if in.WireguardHostEncryptionEnabled != nil { + in, out := &in.WireguardHostEncryptionEnabled, &out.WireguardHostEncryptionEnabled + *out = new(bool) + **out = **in + } + if in.WireguardPersistentKeepAlive != nil { + in, out := &in.WireguardPersistentKeepAlive, &out.WireguardPersistentKeepAlive + *out = new(v1.Duration) + **out = **in + } + if in.AWSSrcDstCheck != nil { + in, out := &in.AWSSrcDstCheck, &out.AWSSrcDstCheck + *out = new(AWSSrcDstCheckOption) + **out = **in + } + if in.FloatingIPs != nil { + in, out := &in.FloatingIPs, &out.FloatingIPs + *out = new(FloatingIPType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FelixConfigurationSpec. +func (in *FelixConfigurationSpec) DeepCopy() *FelixConfigurationSpec { + if in == nil { + return nil + } + out := new(FelixConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkPolicy) DeepCopyInto(out *GlobalNetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkPolicy. +func (in *GlobalNetworkPolicy) DeepCopy() *GlobalNetworkPolicy { + if in == nil { + return nil + } + out := new(GlobalNetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalNetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkPolicyList) DeepCopyInto(out *GlobalNetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GlobalNetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkPolicyList. +func (in *GlobalNetworkPolicyList) DeepCopy() *GlobalNetworkPolicyList { + if in == nil { + return nil + } + out := new(GlobalNetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalNetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkPolicySpec) DeepCopyInto(out *GlobalNetworkPolicySpec) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkPolicySpec. +func (in *GlobalNetworkPolicySpec) DeepCopy() *GlobalNetworkPolicySpec { + if in == nil { + return nil + } + out := new(GlobalNetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkSet) DeepCopyInto(out *GlobalNetworkSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkSet. +func (in *GlobalNetworkSet) DeepCopy() *GlobalNetworkSet { + if in == nil { + return nil + } + out := new(GlobalNetworkSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalNetworkSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkSetList) DeepCopyInto(out *GlobalNetworkSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]GlobalNetworkSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkSetList. +func (in *GlobalNetworkSetList) DeepCopy() *GlobalNetworkSetList { + if in == nil { + return nil + } + out := new(GlobalNetworkSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GlobalNetworkSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlobalNetworkSetSpec) DeepCopyInto(out *GlobalNetworkSetSpec) { + *out = *in + if in.Nets != nil { + in, out := &in.Nets, &out.Nets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalNetworkSetSpec. +func (in *GlobalNetworkSetSpec) DeepCopy() *GlobalNetworkSetSpec { + if in == nil { + return nil + } + out := new(GlobalNetworkSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPMatch) DeepCopyInto(out *HTTPMatch) { + *out = *in + if in.Methods != nil { + in, out := &in.Methods, &out.Methods + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPPath, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPMatch. +func (in *HTTPMatch) DeepCopy() *HTTPMatch { + if in == nil { + return nil + } + out := new(HTTPMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPPath) DeepCopyInto(out *HTTPPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPPath. +func (in *HTTPPath) DeepCopy() *HTTPPath { + if in == nil { + return nil + } + out := new(HTTPPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthTimeoutOverride) DeepCopyInto(out *HealthTimeoutOverride) { + *out = *in + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthTimeoutOverride. +func (in *HealthTimeoutOverride) DeepCopy() *HealthTimeoutOverride { + if in == nil { + return nil + } + out := new(HealthTimeoutOverride) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostEndpoint) DeepCopyInto(out *HostEndpoint) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostEndpoint. +func (in *HostEndpoint) DeepCopy() *HostEndpoint { + if in == nil { + return nil + } + out := new(HostEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostEndpoint) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostEndpointList) DeepCopyInto(out *HostEndpointList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostEndpoint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostEndpointList. +func (in *HostEndpointList) DeepCopy() *HostEndpointList { + if in == nil { + return nil + } + out := new(HostEndpointList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostEndpointList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostEndpointSpec) DeepCopyInto(out *HostEndpointSpec) { + *out = *in + if in.ExpectedIPs != nil { + in, out := &in.ExpectedIPs, &out.ExpectedIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Profiles != nil { + in, out := &in.Profiles, &out.Profiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostEndpointSpec. +func (in *HostEndpointSpec) DeepCopy() *HostEndpointSpec { + if in == nil { + return nil + } + out := new(HostEndpointSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ICMPFields) DeepCopyInto(out *ICMPFields) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(int) + **out = **in + } + if in.Code != nil { + in, out := &in.Code, &out.Code + *out = new(int) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ICMPFields. +func (in *ICMPFields) DeepCopy() *ICMPFields { + if in == nil { + return nil + } + out := new(ICMPFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfiguration) DeepCopyInto(out *IPAMConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfiguration. +func (in *IPAMConfiguration) DeepCopy() *IPAMConfiguration { + if in == nil { + return nil + } + out := new(IPAMConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAMConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfigurationList) DeepCopyInto(out *IPAMConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPAMConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigurationList. +func (in *IPAMConfigurationList) DeepCopy() *IPAMConfigurationList { + if in == nil { + return nil + } + out := new(IPAMConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPAMConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPAMConfigurationSpec) DeepCopyInto(out *IPAMConfigurationSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAMConfigurationSpec. +func (in *IPAMConfigurationSpec) DeepCopy() *IPAMConfigurationSpec { + if in == nil { + return nil + } + out := new(IPAMConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPIPConfiguration) DeepCopyInto(out *IPIPConfiguration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPIPConfiguration. +func (in *IPIPConfiguration) DeepCopy() *IPIPConfiguration { + if in == nil { + return nil + } + out := new(IPIPConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPool) DeepCopyInto(out *IPPool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPool. +func (in *IPPool) DeepCopy() *IPPool { + if in == nil { + return nil + } + out := new(IPPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolList) DeepCopyInto(out *IPPoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPPool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolList. +func (in *IPPoolList) DeepCopy() *IPPoolList { + if in == nil { + return nil + } + out := new(IPPoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPPoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPPoolSpec) DeepCopyInto(out *IPPoolSpec) { + *out = *in + if in.IPIP != nil { + in, out := &in.IPIP, &out.IPIP + *out = new(IPIPConfiguration) + **out = **in + } + if in.AllowedUses != nil { + in, out := &in.AllowedUses, &out.AllowedUses + *out = make([]IPPoolAllowedUse, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPPoolSpec. +func (in *IPPoolSpec) DeepCopy() *IPPoolSpec { + if in == nil { + return nil + } + out := new(IPPoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPReservation) DeepCopyInto(out *IPReservation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPReservation. +func (in *IPReservation) DeepCopy() *IPReservation { + if in == nil { + return nil + } + out := new(IPReservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPReservation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPReservationList) DeepCopyInto(out *IPReservationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IPReservation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPReservationList. +func (in *IPReservationList) DeepCopy() *IPReservationList { + if in == nil { + return nil + } + out := new(IPReservationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IPReservationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPReservationSpec) DeepCopyInto(out *IPReservationSpec) { + *out = *in + if in.ReservedCIDRs != nil { + in, out := &in.ReservedCIDRs, &out.ReservedCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPReservationSpec. +func (in *IPReservationSpec) DeepCopy() *IPReservationSpec { + if in == nil { + return nil + } + out := new(IPReservationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllersConfiguration) DeepCopyInto(out *KubeControllersConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllersConfiguration. +func (in *KubeControllersConfiguration) DeepCopy() *KubeControllersConfiguration { + if in == nil { + return nil + } + out := new(KubeControllersConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllersConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllersConfigurationList) DeepCopyInto(out *KubeControllersConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KubeControllersConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllersConfigurationList. +func (in *KubeControllersConfigurationList) DeepCopy() *KubeControllersConfigurationList { + if in == nil { + return nil + } + out := new(KubeControllersConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllersConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllersConfigurationSpec) DeepCopyInto(out *KubeControllersConfigurationSpec) { + *out = *in + if in.EtcdV3CompactionPeriod != nil { + in, out := &in.EtcdV3CompactionPeriod, &out.EtcdV3CompactionPeriod + *out = new(v1.Duration) + **out = **in + } + if in.PrometheusMetricsPort != nil { + in, out := &in.PrometheusMetricsPort, &out.PrometheusMetricsPort + *out = new(int) + **out = **in + } + in.Controllers.DeepCopyInto(&out.Controllers) + if in.DebugProfilePort != nil { + in, out := &in.DebugProfilePort, &out.DebugProfilePort + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllersConfigurationSpec. +func (in *KubeControllersConfigurationSpec) DeepCopy() *KubeControllersConfigurationSpec { + if in == nil { + return nil + } + out := new(KubeControllersConfigurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllersConfigurationStatus) DeepCopyInto(out *KubeControllersConfigurationStatus) { + *out = *in + in.RunningConfig.DeepCopyInto(&out.RunningConfig) + if in.EnvironmentVars != nil { + in, out := &in.EnvironmentVars, &out.EnvironmentVars + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllersConfigurationStatus. +func (in *KubeControllersConfigurationStatus) DeepCopy() *KubeControllersConfigurationStatus { + if in == nil { + return nil + } + out := new(KubeControllersConfigurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceControllerConfig) DeepCopyInto(out *NamespaceControllerConfig) { + *out = *in + if in.ReconcilerPeriod != nil { + in, out := &in.ReconcilerPeriod, &out.ReconcilerPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceControllerConfig. +func (in *NamespaceControllerConfig) DeepCopy() *NamespaceControllerConfig { + if in == nil { + return nil + } + out := new(NamespaceControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + if in.Order != nil { + in, out := &in.Order, &out.Order + *out = new(float64) + **out = **in + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Types != nil { + in, out := &in.Types, &out.Types + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSet) DeepCopyInto(out *NetworkSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSet. +func (in *NetworkSet) DeepCopy() *NetworkSet { + if in == nil { + return nil + } + out := new(NetworkSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSetList) DeepCopyInto(out *NetworkSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSetList. +func (in *NetworkSetList) DeepCopy() *NetworkSetList { + if in == nil { + return nil + } + out := new(NetworkSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSetSpec) DeepCopyInto(out *NetworkSetSpec) { + *out = *in + if in.Nets != nil { + in, out := &in.Nets, &out.Nets + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSetSpec. +func (in *NetworkSetSpec) DeepCopy() *NetworkSetSpec { + if in == nil { + return nil + } + out := new(NetworkSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeControllerConfig) DeepCopyInto(out *NodeControllerConfig) { + *out = *in + if in.ReconcilerPeriod != nil { + in, out := &in.ReconcilerPeriod, &out.ReconcilerPeriod + *out = new(v1.Duration) + **out = **in + } + if in.HostEndpoint != nil { + in, out := &in.HostEndpoint, &out.HostEndpoint + *out = new(AutoHostEndpointConfig) + **out = **in + } + if in.LeakGracePeriod != nil { + in, out := &in.LeakGracePeriod, &out.LeakGracePeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeControllerConfig. +func (in *NodeControllerConfig) DeepCopy() *NodeControllerConfig { + if in == nil { + return nil + } + out := new(NodeControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyControllerConfig) DeepCopyInto(out *PolicyControllerConfig) { + *out = *in + if in.ReconcilerPeriod != nil { + in, out := &in.ReconcilerPeriod, &out.ReconcilerPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyControllerConfig. +func (in *PolicyControllerConfig) DeepCopy() *PolicyControllerConfig { + if in == nil { + return nil + } + out := new(PolicyControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrefixAdvertisement) DeepCopyInto(out *PrefixAdvertisement) { + *out = *in + if in.Communities != nil { + in, out := &in.Communities, &out.Communities + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrefixAdvertisement. +func (in *PrefixAdvertisement) DeepCopy() *PrefixAdvertisement { + if in == nil { + return nil + } + out := new(PrefixAdvertisement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Profile) DeepCopyInto(out *Profile) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Profile. +func (in *Profile) DeepCopy() *Profile { + if in == nil { + return nil + } + out := new(Profile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Profile) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileList) DeepCopyInto(out *ProfileList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Profile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileList. +func (in *ProfileList) DeepCopy() *ProfileList { + if in == nil { + return nil + } + out := new(ProfileList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProfileList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProfileSpec) DeepCopyInto(out *ProfileSpec) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LabelsToApply != nil { + in, out := &in.LabelsToApply, &out.LabelsToApply + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProfileSpec. +func (in *ProfileSpec) DeepCopy() *ProfileSpec { + if in == nil { + return nil + } + out := new(ProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProtoPort) DeepCopyInto(out *ProtoPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProtoPort. +func (in *ProtoPort) DeepCopy() *ProtoPort { + if in == nil { + return nil + } + out := new(ProtoPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableIDRange) DeepCopyInto(out *RouteTableIDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableIDRange. +func (in *RouteTableIDRange) DeepCopy() *RouteTableIDRange { + if in == nil { + return nil + } + out := new(RouteTableIDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTableRange) DeepCopyInto(out *RouteTableRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableRange. +func (in *RouteTableRange) DeepCopy() *RouteTableRange { + if in == nil { + return nil + } + out := new(RouteTableRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in RouteTableRanges) DeepCopyInto(out *RouteTableRanges) { + { + in := &in + *out = make(RouteTableRanges, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTableRanges. +func (in RouteTableRanges) DeepCopy() RouteTableRanges { + if in == nil { + return nil + } + out := new(RouteTableRanges) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.IPVersion != nil { + in, out := &in.IPVersion, &out.IPVersion + *out = new(int) + **out = **in + } + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(numorstring.Protocol) + **out = **in + } + if in.ICMP != nil { + in, out := &in.ICMP, &out.ICMP + *out = new(ICMPFields) + (*in).DeepCopyInto(*out) + } + if in.NotProtocol != nil { + in, out := &in.NotProtocol, &out.NotProtocol + *out = new(numorstring.Protocol) + **out = **in + } + if in.NotICMP != nil { + in, out := &in.NotICMP, &out.NotICMP + *out = new(ICMPFields) + (*in).DeepCopyInto(*out) + } + in.Source.DeepCopyInto(&out.Source) + in.Destination.DeepCopyInto(&out.Destination) + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPMatch) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(RuleMetadata) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleMetadata) DeepCopyInto(out *RuleMetadata) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleMetadata. +func (in *RuleMetadata) DeepCopy() *RuleMetadata { + if in == nil { + return nil + } + out := new(RuleMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountControllerConfig) DeepCopyInto(out *ServiceAccountControllerConfig) { + *out = *in + if in.ReconcilerPeriod != nil { + in, out := &in.ReconcilerPeriod, &out.ReconcilerPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountControllerConfig. +func (in *ServiceAccountControllerConfig) DeepCopy() *ServiceAccountControllerConfig { + if in == nil { + return nil + } + out := new(ServiceAccountControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountMatch) DeepCopyInto(out *ServiceAccountMatch) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountMatch. +func (in *ServiceAccountMatch) DeepCopy() *ServiceAccountMatch { + if in == nil { + return nil + } + out := new(ServiceAccountMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceClusterIPBlock) DeepCopyInto(out *ServiceClusterIPBlock) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceClusterIPBlock. +func (in *ServiceClusterIPBlock) DeepCopy() *ServiceClusterIPBlock { + if in == nil { + return nil + } + out := new(ServiceClusterIPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceExternalIPBlock) DeepCopyInto(out *ServiceExternalIPBlock) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceExternalIPBlock. +func (in *ServiceExternalIPBlock) DeepCopy() *ServiceExternalIPBlock { + if in == nil { + return nil + } + out := new(ServiceExternalIPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceLoadBalancerIPBlock) DeepCopyInto(out *ServiceLoadBalancerIPBlock) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceLoadBalancerIPBlock. +func (in *ServiceLoadBalancerIPBlock) DeepCopy() *ServiceLoadBalancerIPBlock { + if in == nil { + return nil + } + out := new(ServiceLoadBalancerIPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceMatch) DeepCopyInto(out *ServiceMatch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceMatch. +func (in *ServiceMatch) DeepCopy() *ServiceMatch { + if in == nil { + return nil + } + out := new(ServiceMatch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkloadEndpointControllerConfig) DeepCopyInto(out *WorkloadEndpointControllerConfig) { + *out = *in + if in.ReconcilerPeriod != nil { + in, out := &in.ReconcilerPeriod, &out.ReconcilerPeriod + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkloadEndpointControllerConfig. +func (in *WorkloadEndpointControllerConfig) DeepCopy() *WorkloadEndpointControllerConfig { + if in == nil { + return nil + } + out := new(WorkloadEndpointControllerConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go new file mode 100644 index 000000000..c5cf25862 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/apis/projectcalico/v3/zz_generated.defaults.go @@ -0,0 +1,19 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright (c) 2023 Tigera, Inc. All rights reserved. + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/asnumber.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/asnumber.go new file mode 100644 index 000000000..9ff706d62 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/asnumber.go @@ -0,0 +1,73 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numorstring + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" +) + +type ASNumber uint32 + +// ASNumberFromString creates an ASNumber struct from a string value. The +// string value may simply be a number or may be the ASN in dotted notation. +func ASNumberFromString(s string) (ASNumber, error) { + if num, err := strconv.ParseUint(s, 10, 32); err == nil { + return ASNumber(num), nil + } + + parts := strings.Split(s, ".") + if len(parts) != 2 { + msg := fmt.Sprintf("invalid AS Number format (%s)", s) + return 0, errors.New(msg) + } + + if num1, err := strconv.ParseUint(parts[0], 10, 16); err != nil { + msg := fmt.Sprintf("invalid AS Number format (%s)", s) + return 0, errors.New(msg) + } else if num2, err := strconv.ParseUint(parts[1], 10, 16); err != nil { + msg := fmt.Sprintf("invalid AS Number format (%s)", s) + return 0, errors.New(msg) + } else { + return ASNumber((num1 << 16) + num2), nil + } +} + +// UnmarshalJSON implements the json.Unmarshaller uinterface. +func (a *ASNumber) UnmarshalJSON(b []byte) error { + if err := json.Unmarshal(b, (*uint32)(a)); err == nil { + return nil + } else { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + if v, err := ASNumberFromString(s); err != nil { + return err + } else { + *a = v + return nil + } + } +} + +// String returns the string value, or the Itoa of the uint value. +func (a ASNumber) String() string { + return strconv.FormatUint(uint64(a), 10) +} diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/doc.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/doc.go new file mode 100644 index 000000000..532a7e81f --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/doc.go @@ -0,0 +1,22 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package numorstring implements a set of type definitions that in YAML or JSON +format may be represented by either a number or a string. +*/ + +// +k8s:openapi-gen=true + +package numorstring diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/port.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/port.go new file mode 100644 index 000000000..e07bc2591 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/port.go @@ -0,0 +1,154 @@ +// Copyright (c) 2016-2017 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numorstring + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" +) + +// Port represents either a range of numeric ports or a named port. +// +// - For a named port, set the PortName, leaving MinPort and MaxPort as 0. +// - For a port range, set MinPort and MaxPort to the (inclusive) port numbers. Set +// PortName to "". +// - For a single port, set MinPort = MaxPort and PortName = "". +type Port struct { + MinPort uint16 `json:"minPort,omitempty"` + MaxPort uint16 `json:"maxPort,omitempty"` + PortName string `json:"portName" validate:"omitempty,portName"` +} + +// SinglePort creates a Port struct representing a single port. +func SinglePort(port uint16) Port { + return Port{MinPort: port, MaxPort: port} +} + +func NamedPort(name string) Port { + return Port{PortName: name} +} + +// PortFromRange creates a Port struct representing a range of ports. +func PortFromRange(minPort, maxPort uint16) (Port, error) { + port := Port{MinPort: minPort, MaxPort: maxPort} + if minPort > maxPort { + msg := fmt.Sprintf("minimum port number (%d) is greater than maximum port number (%d) in port range", minPort, maxPort) + return port, errors.New(msg) + } + return port, nil +} + +var ( + allDigits = regexp.MustCompile(`^\d+$`) + portRange = regexp.MustCompile(`^(\d+):(\d+)$`) + nameRegex = regexp.MustCompile("^[a-zA-Z0-9_.-]{1,128}$") +) + +// PortFromString creates a Port struct from its string representation. A port +// may either be single value "1234", a range of values "100:200" or a named port: "name". +func PortFromString(s string) (Port, error) { + if allDigits.MatchString(s) { + // Port is all digits, it should parse as a single port. + num, err := strconv.ParseUint(s, 10, 16) + if err != nil { + msg := fmt.Sprintf("invalid port format (%s)", s) + return Port{}, errors.New(msg) + } + return SinglePort(uint16(num)), nil + } + + if groups := portRange.FindStringSubmatch(s); len(groups) > 0 { + // Port matches :, it should parse as a range of ports. + if pmin, err := strconv.ParseUint(groups[1], 10, 16); err != nil { + msg := fmt.Sprintf("invalid minimum port number in range (%s)", s) + return Port{}, errors.New(msg) + } else if pmax, err := strconv.ParseUint(groups[2], 10, 16); err != nil { + msg := fmt.Sprintf("invalid maximum port number in range (%s)", s) + return Port{}, errors.New(msg) + } else { + return PortFromRange(uint16(pmin), uint16(pmax)) + } + } + + if !nameRegex.MatchString(s) { + msg := fmt.Sprintf("invalid name for named port (%s)", s) + return Port{}, errors.New(msg) + } + + return NamedPort(s), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (p *Port) UnmarshalJSON(b []byte) error { + if b[0] == '"' { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + if v, err := PortFromString(s); err != nil { + return err + } else { + *p = v + return nil + } + } + + // It's not a string, it must be a single int. + var i uint16 + if err := json.Unmarshal(b, &i); err != nil { + return err + } + v := SinglePort(i) + *p = v + return nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (p Port) MarshalJSON() ([]byte, error) { + if p.PortName != "" { + return json.Marshal(p.PortName) + } else if p.MinPort == p.MaxPort { + return json.Marshal(p.MinPort) + } else { + return json.Marshal(p.String()) + } +} + +// String returns the string value. If the min and max port are the same +// this returns a single string representation of the port number, otherwise +// if returns a colon separated range of ports. +func (p Port) String() string { + if p.PortName != "" { + return p.PortName + } else if p.MinPort == p.MaxPort { + return strconv.FormatUint(uint64(p.MinPort), 10) + } else { + return fmt.Sprintf("%d:%d", p.MinPort, p.MaxPort) + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Port) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Port) OpenAPISchemaFormat() string { return "int-or-string" } diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/protocol.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/protocol.go new file mode 100644 index 000000000..1a639f910 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/protocol.go @@ -0,0 +1,145 @@ +// Copyright (c) 2016-2020 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numorstring + +import "strings" + +const ( + ProtocolUDP = "UDP" + ProtocolTCP = "TCP" + ProtocolICMP = "ICMP" + ProtocolICMPv6 = "ICMPv6" + ProtocolSCTP = "SCTP" + ProtocolUDPLite = "UDPLite" + + ProtocolUDPV1 = "udp" + ProtocolTCPV1 = "tcp" + ProtocolSCTPV1 = "sctp" +) + +var ( + allProtocolNames = []string{ + ProtocolUDP, + ProtocolTCP, + ProtocolICMP, + ProtocolICMPv6, + ProtocolSCTP, + ProtocolUDPLite, + } +) + +type Protocol Uint8OrString + +// ProtocolFromInt creates a Protocol struct from an integer value. +func ProtocolFromInt(p uint8) Protocol { + return Protocol( + Uint8OrString{Type: NumOrStringNum, NumVal: p}, + ) +} + +// ProtocolV3FromProtocolV1 creates a v3 Protocol from a v1 Protocol, +// while handling case conversion. +func ProtocolV3FromProtocolV1(p Protocol) Protocol { + if p.Type == NumOrStringNum { + return p + } + + for _, n := range allProtocolNames { + if strings.EqualFold(n, p.StrVal) { + return Protocol( + Uint8OrString{Type: NumOrStringString, StrVal: n}, + ) + } + } + + return p +} + +// ProtocolFromString creates a Protocol struct from a string value. +func ProtocolFromString(p string) Protocol { + for _, n := range allProtocolNames { + if strings.EqualFold(n, p) { + return Protocol( + Uint8OrString{Type: NumOrStringString, StrVal: n}, + ) + } + } + + // Unknown protocol - return the value unchanged. Validation should catch this. + return Protocol( + Uint8OrString{Type: NumOrStringString, StrVal: p}, + ) +} + +// ProtocolFromStringV1 creates a Protocol struct from a string value (for the v1 API) +func ProtocolFromStringV1(p string) Protocol { + return Protocol( + Uint8OrString{Type: NumOrStringString, StrVal: strings.ToLower(p)}, + ) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (p *Protocol) UnmarshalJSON(b []byte) error { + return (*Uint8OrString)(p).UnmarshalJSON(b) +} + +// MarshalJSON implements the json.Marshaller interface. +func (p Protocol) MarshalJSON() ([]byte, error) { + return Uint8OrString(p).MarshalJSON() +} + +// String returns the string value, or the Itoa of the int value. +func (p Protocol) String() string { + return (Uint8OrString)(p).String() +} + +// String returns the string value, or the Itoa of the int value. +func (p Protocol) ToV1() Protocol { + if p.Type == NumOrStringNum { + return p + } + return ProtocolFromStringV1(p.StrVal) +} + +// NumValue returns the NumVal if type Int, or if +// it is a String, will attempt a conversion to int. +func (p Protocol) NumValue() (uint8, error) { + return (Uint8OrString)(p).NumValue() +} + +// SupportsProtocols returns whether this protocol supports ports. This returns true if +// the numerical or string version of the protocol indicates TCP (6), UDP (17), or SCTP (132). +func (p Protocol) SupportsPorts() bool { + num, err := p.NumValue() + if err == nil { + return num == 6 || num == 17 || num == 132 + } else { + switch p.StrVal { + case ProtocolTCP, ProtocolUDP, ProtocolTCPV1, ProtocolUDPV1, ProtocolSCTP, ProtocolSCTPV1: + return true + } + return false + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Protocol) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Protocol) OpenAPISchemaFormat() string { return "int-or-string" } diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/type.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/type.go new file mode 100644 index 000000000..ae50cba43 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/type.go @@ -0,0 +1,23 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numorstring + +// Type represents the stored type of Int32OrString. +type NumOrStringType int + +const ( + NumOrStringNum NumOrStringType = iota // The structure holds a number. + NumOrStringString // The structure holds a string. +) diff --git a/vendor/github.com/projectcalico/api/pkg/lib/numorstring/uint8orstring.go b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/uint8orstring.go new file mode 100644 index 000000000..16820af42 --- /dev/null +++ b/vendor/github.com/projectcalico/api/pkg/lib/numorstring/uint8orstring.go @@ -0,0 +1,90 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package numorstring + +import ( + "encoding/json" + "strconv" +) + +// UInt8OrString is a type that can hold an uint8 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +type Uint8OrString struct { + Type NumOrStringType `json:"type"` + NumVal uint8 `json:"numVal"` + StrVal string `json:"strVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (i *Uint8OrString) UnmarshalJSON(b []byte) error { + if b[0] == '"' { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + + num, err := strconv.ParseUint(s, 10, 8) + if err == nil { + i.Type = NumOrStringNum + i.NumVal = uint8(num) + } else { + i.Type = NumOrStringString + i.StrVal = s + } + + return nil + } + i.Type = NumOrStringNum + return json.Unmarshal(b, &i.NumVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (i Uint8OrString) MarshalJSON() ([]byte, error) { + if num, err := i.NumValue(); err == nil { + return json.Marshal(num) + } else { + return json.Marshal(i.StrVal) + } +} + +// String returns the string value, or the Itoa of the int value. +func (i Uint8OrString) String() string { + if i.Type == NumOrStringString { + return i.StrVal + } + return strconv.FormatUint(uint64(i.NumVal), 10) +} + +// NumValue returns the NumVal if type Int, or if +// it is a String, will attempt a conversion to int. +func (i Uint8OrString) NumValue() (uint8, error) { + if i.Type == NumOrStringString { + num, err := strconv.ParseUint(i.StrVal, 10, 8) + return uint8(num), err + } + return i.NumVal, nil +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Uint8OrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Uint8OrString) OpenAPISchemaFormat() string { return "int-or-string" } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 179d6e8fc..4921b2d4a 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/set" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -103,7 +103,7 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { } // unmarshalMessage unmarshals into the given protoreflect.Message. -func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -150,24 +150,24 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { } // Resolve the field descriptor. - var name pref.Name - var fd pref.FieldDescriptor - var xt pref.ExtensionType + var name protoreflect.Name + var fd protoreflect.FieldDescriptor + var xt protoreflect.ExtensionType var xtErr error var isFieldNumberName bool switch tok.NameKind() { case text.IdentName: - name = pref.Name(tok.IdentName()) + name = protoreflect.Name(tok.IdentName()) fd = fieldDescs.ByTextName(string(name)) case text.TypeName: // Handle extensions only. This code path is not for Any. - xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName())) + xt, xtErr = d.opts.Resolver.FindExtensionByName(protoreflect.FullName(tok.TypeName())) case text.FieldNumber: isFieldNumberName = true - num := pref.FieldNumber(tok.FieldNumber()) + num := protoreflect.FieldNumber(tok.FieldNumber()) if !num.IsValid() { return d.newError(tok.Pos(), "invalid field number: %d", num) } @@ -215,7 +215,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { switch { case fd.IsList(): kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -232,7 +232,7 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { default: kind := fd.Kind() - if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + if kind != protoreflect.MessageKind && kind != protoreflect.GroupKind && !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -262,11 +262,11 @@ func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { // unmarshalSingular unmarshals a non-repeated field value specified by the // given FieldDescriptor. -func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { - var val pref.Value +func (d decoder) unmarshalSingular(fd protoreflect.FieldDescriptor, m protoreflect.Message) error { + var val protoreflect.Value var err error switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: val = m.NewField(fd) err = d.unmarshalMessage(val.Message(), true) default: @@ -280,94 +280,94 @@ func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) erro // unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the // given FieldDescriptor. -func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { tok, err := d.Read() if err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } if tok.Kind() != text.Scalar { - return pref.Value{}, d.unexpectedTokenError(tok) + return protoreflect.Value{}, d.unexpectedTokenError(tok) } kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: if b, ok := tok.Bool(); ok { - return pref.ValueOfBool(b), nil + return protoreflect.ValueOfBool(b), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if n, ok := tok.Int32(); ok { - return pref.ValueOfInt32(n), nil + return protoreflect.ValueOfInt32(n), nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if n, ok := tok.Int64(); ok { - return pref.ValueOfInt64(n), nil + return protoreflect.ValueOfInt64(n), nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if n, ok := tok.Uint32(); ok { - return pref.ValueOfUint32(n), nil + return protoreflect.ValueOfUint32(n), nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if n, ok := tok.Uint64(); ok { - return pref.ValueOfUint64(n), nil + return protoreflect.ValueOfUint64(n), nil } - case pref.FloatKind: + case protoreflect.FloatKind: if n, ok := tok.Float32(); ok { - return pref.ValueOfFloat32(n), nil + return protoreflect.ValueOfFloat32(n), nil } - case pref.DoubleKind: + case protoreflect.DoubleKind: if n, ok := tok.Float64(); ok { - return pref.ValueOfFloat64(n), nil + return protoreflect.ValueOfFloat64(n), nil } - case pref.StringKind: + case protoreflect.StringKind: if s, ok := tok.String(); ok { if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { - return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + return protoreflect.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") } - return pref.ValueOfString(s), nil + return protoreflect.ValueOfString(s), nil } - case pref.BytesKind: + case protoreflect.BytesKind: if b, ok := tok.String(); ok { - return pref.ValueOfBytes([]byte(b)), nil + return protoreflect.ValueOfBytes([]byte(b)), nil } - case pref.EnumKind: + case protoreflect.EnumKind: if lit, ok := tok.Enum(); ok { // Lookup EnumNumber based on name. - if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { - return pref.ValueOfEnum(enumVal.Number()), nil + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(lit)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), nil } } if num, ok := tok.Int32(); ok { - return pref.ValueOfEnum(pref.EnumNumber(num)), nil + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(num)), nil } default: panic(fmt.Sprintf("invalid scalar kind %v", kind)) } - return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) } // unmarshalList unmarshals into given protoreflect.List. A list value can // either be in [] syntax or simply just a single scalar/message value. -func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { +func (d decoder) unmarshalList(fd protoreflect.FieldDescriptor, list protoreflect.List) error { tok, err := d.Peek() if err != nil { return err } switch fd.Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: switch tok.Kind() { case text.ListOpen: d.Read() @@ -441,22 +441,22 @@ func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { +func (d decoder) unmarshalMap(fd protoreflect.FieldDescriptor, mmap protoreflect.Map) error { // Determine ahead whether map entry is a scalar type or a message type in // order to call the appropriate unmarshalMapValue func inside // unmarshalMapEntry. - var unmarshalMapValue func() (pref.Value, error) + var unmarshalMapValue func() (protoreflect.Value, error) switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: - unmarshalMapValue = func() (pref.Value, error) { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { pval := mmap.NewValue() if err := d.unmarshalMessage(pval.Message(), true); err != nil { - return pref.Value{}, err + return protoreflect.Value{}, err } return pval, nil } default: - unmarshalMapValue = func() (pref.Value, error) { + unmarshalMapValue = func() (protoreflect.Value, error) { return d.unmarshalScalar(fd.MapValue()) } } @@ -494,9 +494,9 @@ func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { // unmarshalMap unmarshals into given protoreflect.Map. A map value is a // textproto message containing {key: , value: }. -func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { - var key pref.MapKey - var pval pref.Value +func (d decoder) unmarshalMapEntry(fd protoreflect.FieldDescriptor, mmap protoreflect.Map, unmarshalMapValue func() (protoreflect.Value, error)) error { + var key protoreflect.MapKey + var pval protoreflect.Value Loop: for { // Read field name. @@ -520,7 +520,7 @@ Loop: return d.unexpectedTokenError(tok) } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.MapEntry_Key_field_name: if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") @@ -535,7 +535,7 @@ Loop: key = val.MapKey() case genid.MapEntry_Value_field_name: - if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if kind := fd.MapValue().Kind(); (kind != protoreflect.MessageKind) && (kind != protoreflect.GroupKind) { if !tok.HasSeparator() { return d.syntaxError(tok.Pos(), "missing field separator :") } @@ -561,7 +561,7 @@ Loop: } if !pval.IsValid() { switch fd.MapValue().Kind() { - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: // If value field is not set for message/group types, construct an // empty one as default. pval = mmap.NewValue() @@ -575,7 +575,7 @@ Loop: // unmarshalAny unmarshals an Any textproto. It can either be in expanded form // or non-expanded form. -func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { +func (d decoder) unmarshalAny(m protoreflect.Message, checkDelims bool) error { var typeURL string var bValue []byte var seenTypeUrl bool @@ -619,7 +619,7 @@ Loop: return d.syntaxError(tok.Pos(), "missing field separator :") } - switch name := pref.Name(tok.IdentName()); name { + switch name := protoreflect.Name(tok.IdentName()); name { case genid.Any_TypeUrl_field_name: if seenTypeUrl { return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname) @@ -686,10 +686,10 @@ Loop: fds := m.Descriptor().Fields() if len(typeURL) > 0 { - m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL)) + m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), protoreflect.ValueOfString(typeURL)) } if len(bValue) > 0 { - m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue)) + m.Set(fds.ByNumber(genid.Any_Value_field_number), protoreflect.ValueOfBytes(bValue)) } return nil } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index 8d5304dc5..ebf6c6528 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -20,7 +20,6 @@ import ( "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -150,7 +149,7 @@ type encoder struct { } // marshalMessage marshals the given protoreflect.Message. -func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { +func (e encoder) marshalMessage(m protoreflect.Message, inclDelims bool) error { messageDesc := m.Descriptor() if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { return errors.New("no support for proto1 MessageSets") @@ -190,7 +189,7 @@ func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { } // marshalField marshals the given field with protoreflect.Value. -func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalField(name string, val protoreflect.Value, fd protoreflect.FieldDescriptor) error { switch { case fd.IsList(): return e.marshalList(name, val.List(), fd) @@ -204,40 +203,40 @@ func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescript // marshalSingular marshals the given non-repeated field value. This includes // all scalar types, enums, messages, and groups. -func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { kind := fd.Kind() switch kind { - case pref.BoolKind: + case protoreflect.BoolKind: e.WriteBool(val.Bool()) - case pref.StringKind: + case protoreflect.StringKind: s := val.String() if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { return errors.InvalidUTF8(string(fd.FullName())) } e.WriteString(s) - case pref.Int32Kind, pref.Int64Kind, - pref.Sint32Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Int64Kind, + protoreflect.Sint32Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: e.WriteInt(val.Int()) - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: e.WriteUint(val.Uint()) - case pref.FloatKind: + case protoreflect.FloatKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 32) - case pref.DoubleKind: + case protoreflect.DoubleKind: // Encoder.WriteFloat handles the special numbers NaN and infinites. e.WriteFloat(val.Float(), 64) - case pref.BytesKind: + case protoreflect.BytesKind: e.WriteString(string(val.Bytes())) - case pref.EnumKind: + case protoreflect.EnumKind: num := val.Enum() if desc := fd.Enum().Values().ByNumber(num); desc != nil { e.WriteLiteral(string(desc.Name())) @@ -246,7 +245,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error e.WriteInt(int64(num)) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return e.marshalMessage(val.Message(), true) default: @@ -256,7 +255,7 @@ func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error } // marshalList marshals the given protoreflect.List as multiple name-value fields. -func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { +func (e encoder) marshalList(name string, list protoreflect.List, fd protoreflect.FieldDescriptor) error { size := list.Len() for i := 0; i < size; i++ { e.WriteName(name) @@ -268,9 +267,9 @@ func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescripto } // marshalMap marshals the given protoreflect.Map as multiple name-value fields. -func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { +func (e encoder) marshalMap(name string, mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { var err error - order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool { + order.RangeEntries(mmap, order.GenericKeyOrder, func(key protoreflect.MapKey, val protoreflect.Value) bool { e.WriteName(name) e.StartMessage() defer e.EndMessage() @@ -334,7 +333,7 @@ func (e encoder) marshalUnknown(b []byte) { // marshalAny marshals the given google.protobuf.Any message in expanded form. // It returns true if it was able to marshal, else false. -func (e encoder) marshalAny(any pref.Message) bool { +func (e encoder) marshalAny(any protoreflect.Message) bool { // Construct the embedded message. fds := any.Descriptor().Fields() fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b70..ce57f57eb 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } @@ -507,6 +516,7 @@ func EncodeTag(num Number, typ Type) uint64 { } // DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// // Input: {…, 5, 3, 1, 0, 2, 4, 6, …} // Output: {…, -3, -2, -1, 0, +1, +2, +3, …} func DecodeZigZag(x uint64) int64 { @@ -514,6 +524,7 @@ func DecodeZigZag(x uint64) int64 { } // EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// // Input: {…, -3, -2, -1, 0, +1, +2, +3, …} // Output: {…, 5, 3, 1, 0, 2, 4, 6, …} func EncodeZigZag(x int64) uint64 { @@ -521,6 +532,7 @@ func EncodeZigZag(x int64) uint64 { } // DecodeBool decodes a uint64 as a bool. +// // Input: { 0, 1, 2, …} // Output: {false, true, true, …} func DecodeBool(x uint64) bool { @@ -528,6 +540,7 @@ func DecodeBool(x uint64) bool { } // EncodeBool encodes a bool as a uint64. +// // Input: {false, true} // Output: { 0, 1} func EncodeBool(x bool) uint64 { diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go index 360c63329..db5248e1b 100644 --- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -14,7 +14,7 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type list interface { @@ -30,17 +30,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { if isRoot { var name string switch vs.(type) { - case pref.Names: + case protoreflect.Names: name = "Names" - case pref.FieldNumbers: + case protoreflect.FieldNumbers: name = "FieldNumbers" - case pref.FieldRanges: + case protoreflect.FieldRanges: name = "FieldRanges" - case pref.EnumRanges: + case protoreflect.EnumRanges: name = "EnumRanges" - case pref.FileImports: + case protoreflect.FileImports: name = "FileImports" - case pref.Descriptor: + case protoreflect.Descriptor: name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" default: name = reflect.ValueOf(vs).Elem().Type().Name() @@ -50,17 +50,17 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { var ss []string switch vs := vs.(type) { - case pref.Names: + case protoreflect.Names: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldNumbers: + case protoreflect.FieldNumbers: for i := 0; i < vs.Len(); i++ { ss = append(ss, fmt.Sprint(vs.Get(i))) } return start + joinStrings(ss, false) + end - case pref.FieldRanges: + case protoreflect.FieldRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0]+1 == r[1] { @@ -70,7 +70,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.EnumRanges: + case protoreflect.EnumRanges: for i := 0; i < vs.Len(); i++ { r := vs.Get(i) if r[0] == r[1] { @@ -80,7 +80,7 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } } return start + joinStrings(ss, false) + end - case pref.FileImports: + case protoreflect.FileImports: for i := 0; i < vs.Len(); i++ { var rs records rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") @@ -88,11 +88,11 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { } return start + joinStrings(ss, allowMulti) + end default: - _, isEnumValue := vs.(pref.EnumValueDescriptors) + _, isEnumValue := vs.(protoreflect.EnumValueDescriptors) for i := 0; i < vs.Len(); i++ { m := reflect.ValueOf(vs).MethodByName("Get") v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() - ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue)) } return start + joinStrings(ss, allowMulti && isEnumValue) + end } @@ -106,20 +106,20 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string { // // Using a list allows us to print the accessors in a sensible order. var descriptorAccessors = map[reflect.Type][]string{ - reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, - reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, - reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, - reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt - reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, - reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, - reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, - reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, + reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, } -func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { +func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) { io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) } -func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { +func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string { rv := reflect.ValueOf(t) rt := rv.MethodByName("ProtoType").Type().In(0) @@ -128,7 +128,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { start = rt.Name() + "{" } - _, isFile := t.(pref.FileDescriptor) + _, isFile := t.(protoreflect.FileDescriptor) rs := records{allowMulti: allowMulti} if t.IsPlaceholder() { if isFile { @@ -146,7 +146,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, "Name") } switch t := t.(type) { - case pref.FieldDescriptor: + case protoreflect.FieldDescriptor: for _, s := range descriptorAccessors[rt] { switch s { case "MapKey": @@ -156,9 +156,9 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { case "MapValue": if v := t.MapValue(); v != nil { switch v.Kind() { - case pref.EnumKind: + case protoreflect.EnumKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) default: rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) @@ -180,7 +180,7 @@ func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { rs.Append(rv, s) } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: var ss []string fs := t.Fields() for i := 0; i < fs.Len(); i++ { @@ -216,7 +216,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { if !rv.IsValid() { panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) } - if _, ok := rv.Interface().(pref.Value); ok { + if _, ok := rv.Interface().(protoreflect.Value); ok { rv = rv.MethodByName("Interface").Call(nil)[0] if !rv.IsNil() { rv = rv.Elem() @@ -250,9 +250,9 @@ func (rs *records) Append(v reflect.Value, accessors ...string) { switch v := v.(type) { case list: s = formatListOpt(v, false, rs.allowMulti) - case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: - s = string(v.(pref.Descriptor).Name()) - case pref.Descriptor: + case protoreflect.FieldDescriptor, protoreflect.OneofDescriptor, protoreflect.EnumValueDescriptor, protoreflect.MethodDescriptor: + s = string(v.(protoreflect.Descriptor).Name()) + case protoreflect.Descriptor: s = string(v.FullName()) case string: s = strconv.Quote(v) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go index fdd9b13f2..328dc733b 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -15,8 +15,8 @@ import ( "strconv" ptext "google.golang.org/protobuf/internal/encoding/text" - errors "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" ) // Format is the serialization format used to represent the default value. @@ -35,56 +35,56 @@ const ( // Unmarshal deserializes the default string s according to the given kind k. // When k is an enum, a list of enum value descriptors must be provided. -func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { +func Unmarshal(s string, k protoreflect.Kind, evs protoreflect.EnumValueDescriptors, f Format) (protoreflect.Value, protoreflect.EnumValueDescriptor, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { switch s { case "1": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "0": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } else { switch s { case "true": - return pref.ValueOfBool(true), nil, nil + return protoreflect.ValueOfBool(true), nil, nil case "false": - return pref.ValueOfBool(false), nil, nil + return protoreflect.ValueOfBool(false), nil, nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { // Go tags use the numeric form of the enum value. if n, err := strconv.ParseInt(s, 10, 32); err == nil { - if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + if ev := evs.ByNumber(protoreflect.EnumNumber(n)); ev != nil { + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } } else { // Descriptor default_value use the enum identifier. - ev := evs.ByName(pref.Name(s)) + ev := evs.ByName(protoreflect.Name(s)) if ev != nil { - return pref.ValueOfEnum(ev.Number()), ev, nil + return protoreflect.ValueOfEnum(ev.Number()), ev, nil } } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if v, err := strconv.ParseInt(s, 10, 32); err == nil { - return pref.ValueOfInt32(int32(v)), nil, nil + return protoreflect.ValueOfInt32(int32(v)), nil, nil } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if v, err := strconv.ParseInt(s, 10, 64); err == nil { - return pref.ValueOfInt64(int64(v)), nil, nil + return protoreflect.ValueOfInt64(int64(v)), nil, nil } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if v, err := strconv.ParseUint(s, 10, 32); err == nil { - return pref.ValueOfUint32(uint32(v)), nil, nil + return protoreflect.ValueOfUint32(uint32(v)), nil, nil } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if v, err := strconv.ParseUint(s, 10, 64); err == nil { - return pref.ValueOfUint64(uint64(v)), nil, nil + return protoreflect.ValueOfUint64(uint64(v)), nil, nil } - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: var v float64 var err error switch s { @@ -98,29 +98,29 @@ func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) ( v, err = strconv.ParseFloat(s, 64) } if err == nil { - if k == pref.FloatKind { - return pref.ValueOfFloat32(float32(v)), nil, nil + if k == protoreflect.FloatKind { + return protoreflect.ValueOfFloat32(float32(v)), nil, nil } else { - return pref.ValueOfFloat64(float64(v)), nil, nil + return protoreflect.ValueOfFloat64(float64(v)), nil, nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are already unescaped and can be used as is. - return pref.ValueOfString(s), nil, nil - case pref.BytesKind: + return protoreflect.ValueOfString(s), nil, nil + case protoreflect.BytesKind: if b, ok := unmarshalBytes(s); ok { - return pref.ValueOfBytes(b), nil, nil + return protoreflect.ValueOfBytes(b), nil, nil } } - return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) + return protoreflect.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) } // Marshal serializes v as the default string according to the given kind k. // When specifying the Descriptor format for an enum kind, the associated // enum value descriptor must be provided. -func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { +func Marshal(v protoreflect.Value, ev protoreflect.EnumValueDescriptor, k protoreflect.Kind, f Format) (string, error) { switch k { - case pref.BoolKind: + case protoreflect.BoolKind: if f == GoTag { if v.Bool() { return "1", nil @@ -134,17 +134,17 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( return "false", nil } } - case pref.EnumKind: + case protoreflect.EnumKind: if f == GoTag { return strconv.FormatInt(int64(v.Enum()), 10), nil } else { return string(ev.Name()), nil } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: return strconv.FormatInt(v.Int(), 10), nil - case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: return strconv.FormatUint(v.Uint(), 10), nil - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: f := v.Float() switch { case math.IsInf(f, -1): @@ -154,16 +154,16 @@ func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) ( case math.IsNaN(f): return "nan", nil default: - if k == pref.FloatKind { + if k == protoreflect.FloatKind { return strconv.FormatFloat(f, 'g', -1, 32), nil } else { return strconv.FormatFloat(f, 'g', -1, 64), nil } } - case pref.StringKind: + case protoreflect.StringKind: // String values are serialized as is without any escaping. return v.String(), nil - case pref.BytesKind: + case protoreflect.BytesKind: if s, ok := marshalBytes(v.Bytes()); ok { return s, nil } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go index c1866f3c1..a6693f0a2 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // The MessageSet wire format is equivalent to a message defined as follows, @@ -33,6 +33,7 @@ const ( // ExtensionName is the field name for extensions of MessageSet. // // A valid MessageSet extension must be of the form: +// // message MyMessage { // extend proto2.bridge.MessageSet { // optional MyMessage message_set_extension = 1234; @@ -42,13 +43,13 @@ const ( const ExtensionName = "message_set_extension" // IsMessageSet returns whether the message uses the MessageSet wire format. -func IsMessageSet(md pref.MessageDescriptor) bool { +func IsMessageSet(md protoreflect.MessageDescriptor) bool { xmd, ok := md.(interface{ IsMessageSet() bool }) return ok && xmd.IsMessageSet() } // IsMessageSetExtension reports this field properly extends a MessageSet. -func IsMessageSetExtension(fd pref.FieldDescriptor) bool { +func IsMessageSetExtension(fd protoreflect.FieldDescriptor) bool { switch { case fd.Name() != ExtensionName: return false diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 38f1931c6..373d20837 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - defval "google.golang.org/protobuf/internal/encoding/defval" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var byteType = reflect.TypeOf(byte(0)) @@ -29,9 +29,9 @@ var byteType = reflect.TypeOf(byte(0)) // This does not populate the Enum or Message (except for weak message). // // This function is a best effort attempt; parsing errors are ignored. -func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { - f := new(fdesc.Field) - f.L0.ParentFile = fdesc.SurrogateProto2 +func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { + f := new(filedesc.Field) + f.L0.ParentFile = filedesc.SurrogateProto2 for len(tag) > 0 { i := strings.IndexByte(tag, ',') if i < 0 { @@ -39,68 +39,68 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p } switch s := tag[:i]; { case strings.HasPrefix(s, "name="): - f.L0.FullName = pref.FullName(s[len("name="):]) + f.L0.FullName = protoreflect.FullName(s[len("name="):]) case strings.Trim(s, "0123456789") == "": n, _ := strconv.ParseUint(s, 10, 32) - f.L1.Number = pref.FieldNumber(n) + f.L1.Number = protoreflect.FieldNumber(n) case s == "opt": - f.L1.Cardinality = pref.Optional + f.L1.Cardinality = protoreflect.Optional case s == "req": - f.L1.Cardinality = pref.Required + f.L1.Cardinality = protoreflect.Required case s == "rep": - f.L1.Cardinality = pref.Repeated + f.L1.Cardinality = protoreflect.Repeated case s == "varint": switch goType.Kind() { case reflect.Bool: - f.L1.Kind = pref.BoolKind + f.L1.Kind = protoreflect.BoolKind case reflect.Int32: - f.L1.Kind = pref.Int32Kind + f.L1.Kind = protoreflect.Int32Kind case reflect.Int64: - f.L1.Kind = pref.Int64Kind + f.L1.Kind = protoreflect.Int64Kind case reflect.Uint32: - f.L1.Kind = pref.Uint32Kind + f.L1.Kind = protoreflect.Uint32Kind case reflect.Uint64: - f.L1.Kind = pref.Uint64Kind + f.L1.Kind = protoreflect.Uint64Kind } case s == "zigzag32": if goType.Kind() == reflect.Int32 { - f.L1.Kind = pref.Sint32Kind + f.L1.Kind = protoreflect.Sint32Kind } case s == "zigzag64": if goType.Kind() == reflect.Int64 { - f.L1.Kind = pref.Sint64Kind + f.L1.Kind = protoreflect.Sint64Kind } case s == "fixed32": switch goType.Kind() { case reflect.Int32: - f.L1.Kind = pref.Sfixed32Kind + f.L1.Kind = protoreflect.Sfixed32Kind case reflect.Uint32: - f.L1.Kind = pref.Fixed32Kind + f.L1.Kind = protoreflect.Fixed32Kind case reflect.Float32: - f.L1.Kind = pref.FloatKind + f.L1.Kind = protoreflect.FloatKind } case s == "fixed64": switch goType.Kind() { case reflect.Int64: - f.L1.Kind = pref.Sfixed64Kind + f.L1.Kind = protoreflect.Sfixed64Kind case reflect.Uint64: - f.L1.Kind = pref.Fixed64Kind + f.L1.Kind = protoreflect.Fixed64Kind case reflect.Float64: - f.L1.Kind = pref.DoubleKind + f.L1.Kind = protoreflect.DoubleKind } case s == "bytes": switch { case goType.Kind() == reflect.String: - f.L1.Kind = pref.StringKind + f.L1.Kind = protoreflect.StringKind case goType.Kind() == reflect.Slice && goType.Elem() == byteType: - f.L1.Kind = pref.BytesKind + f.L1.Kind = protoreflect.BytesKind default: - f.L1.Kind = pref.MessageKind + f.L1.Kind = protoreflect.MessageKind } case s == "group": - f.L1.Kind = pref.GroupKind + f.L1.Kind = protoreflect.GroupKind case strings.HasPrefix(s, "enum="): - f.L1.Kind = pref.EnumKind + f.L1.Kind = protoreflect.EnumKind case strings.HasPrefix(s, "json="): jsonName := s[len("json="):] if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { @@ -111,23 +111,23 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p f.L1.IsPacked = true case strings.HasPrefix(s, "weak="): f.L1.IsWeak = true - f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. s, i = tag[len("def="):], len(tag) v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) - f.L1.Default = fdesc.DefaultValue(v, ev) + f.L1.Default = filedesc.DefaultValue(v, ev) case s == "proto3": - f.L0.ParentFile = fdesc.SurrogateProto3 + f.L0.ParentFile = filedesc.SurrogateProto3 } tag = strings.TrimPrefix(tag[i:], ",") } // The generator uses the group message name instead of the field name. // We obtain the real field name by lowercasing the group name. - if f.L1.Kind == pref.GroupKind { - f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + if f.L1.Kind == protoreflect.GroupKind { + f.L0.FullName = protoreflect.FullName(strings.ToLower(string(f.L0.FullName))) } return f } @@ -140,38 +140,38 @@ func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) p // Depending on the context on how Marshal is called, there are different ways // through which that information is determined. As such it is the caller's // responsibility to provide a function to obtain that information. -func Marshal(fd pref.FieldDescriptor, enumName string) string { +func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { var tag []string switch fd.Kind() { - case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + case protoreflect.BoolKind, protoreflect.EnumKind, protoreflect.Int32Kind, protoreflect.Uint32Kind, protoreflect.Int64Kind, protoreflect.Uint64Kind: tag = append(tag, "varint") - case pref.Sint32Kind: + case protoreflect.Sint32Kind: tag = append(tag, "zigzag32") - case pref.Sint64Kind: + case protoreflect.Sint64Kind: tag = append(tag, "zigzag64") - case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + case protoreflect.Sfixed32Kind, protoreflect.Fixed32Kind, protoreflect.FloatKind: tag = append(tag, "fixed32") - case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + case protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind, protoreflect.DoubleKind: tag = append(tag, "fixed64") - case pref.StringKind, pref.BytesKind, pref.MessageKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind: tag = append(tag, "bytes") - case pref.GroupKind: + case protoreflect.GroupKind: tag = append(tag, "group") } tag = append(tag, strconv.Itoa(int(fd.Number()))) switch fd.Cardinality() { - case pref.Optional: + case protoreflect.Optional: tag = append(tag, "opt") - case pref.Required: + case protoreflect.Required: tag = append(tag, "req") - case pref.Repeated: + case protoreflect.Repeated: tag = append(tag, "rep") } if fd.IsPacked() { tag = append(tag, "packed") } name := string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { // The name of the FieldDescriptor for a group field is // lowercased. To find the original capitalization, we // look in the field's MessageType. @@ -189,10 +189,10 @@ func Marshal(fd pref.FieldDescriptor, enumName string) string { // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. - if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + if fd.Syntax() == protoreflect.Proto3 && !fd.IsExtension() { tag = append(tag, "proto3") } - if fd.Kind() == pref.EnumKind && enumName != "" { + if fd.Kind() == protoreflect.EnumKind && enumName != "" { tag = append(tag, "enum="+enumName) } if fd.ContainingOneof() != nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea102..427c62d03 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "regexp" "strconv" "unicode/utf8" @@ -381,7 +380,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { @@ -421,7 +420,7 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) } - return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) } // parseTypeName parses Any type URL or extension field name. The name is @@ -571,7 +570,7 @@ func (d *Decoder) parseScalar() (Token, error) { return tok, nil } - return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) + return Token{}, d.newSyntaxError("invalid scalar value: %s", errId(d.in)) } // parseLiteralValue parses a literal value. A literal value is used for @@ -653,8 +652,29 @@ func consume(b []byte, n int) []byte { return b } -// Any sequence that looks like a non-delimiter (for error reporting). -var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) +// errId extracts a byte sequence that looks like an invalid ID +// (for the purposes of error reporting). +func errId(seq []byte) []byte { + const maxLen = 32 + for i := 0; i < len(seq); { + if i > maxLen { + return append(seq[:i:i], "…"...) + } + r, size := utf8.DecodeRune(seq[i:]) + if r > utf8.RuneSelf || (r != '/' && isDelim(byte(r))) { + if i == 0 { + // Either the first byte is invalid UTF-8 or a + // delimiter, or the first rune is non-ASCII. + // Return it as-is. + i = size + } + return seq[:i:i] + } + i += size + } + // No delimiter found. + return seq +} // isDelim returns true if given byte is a delimiter character. func isDelim(c byte) bool { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index f2d90b789..81a5d8c86 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -50,8 +50,10 @@ type number struct { // parseNumber constructs a number object from given input. It allows for the // following patterns: -// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) -// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// // It also returns the number of parsed bytes for the given number, 0 if it is // not a number. func parseNumber(input []byte) number { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go index 0ce8d6fb8..7ae6c2a3c 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -24,6 +24,6 @@ // the Go implementation should as well. // // The text format is almost a superset of JSON except: -// * message keys are not quoted strings, but identifiers -// * the top-level value must be a message without the delimiters +// - message keys are not quoted strings, but identifiers +// - the top-level value must be a message without the delimiters package text diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b3..fbcd34920 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191..5e72f1cde 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go index b293b6947..7cac1c190 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/build.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -12,8 +12,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder construct a protoreflect.FileDescriptor from the raw descriptor. @@ -38,7 +37,7 @@ type Builder struct { // TypeResolver resolves extension field types for descriptor options. // If nil, it uses protoregistry.GlobalTypes. TypeResolver interface { - preg.ExtensionTypeResolver + protoregistry.ExtensionTypeResolver } // FileRegistry is use to lookup file, enum, and message dependencies. @@ -46,8 +45,8 @@ type Builder struct { // If nil, it uses protoregistry.GlobalFiles. FileRegistry interface { FindFileByPath(string) (protoreflect.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } } @@ -55,8 +54,8 @@ type Builder struct { // If so, it permits looking up an enum or message dependency based on the // sub-list and element index into filetype.Builder.DependencyIndexes. type resolverByIndex interface { - FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor - FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor + FindEnumByIndex(int32, int32, []Enum, []Message) protoreflect.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) protoreflect.MessageDescriptor } // Indexes of each sub-list in filetype.Builder.DependencyIndexes. @@ -70,7 +69,7 @@ const ( // Out is the output of the Builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor // Enums is all enum descriptors in "flattened ordering". Enums []Enum @@ -97,10 +96,10 @@ func (db Builder) Build() (out Out) { // Initialize resolvers and registries if unpopulated. if db.TypeResolver == nil { - db.TypeResolver = preg.GlobalTypes + db.TypeResolver = protoregistry.GlobalTypes } if db.FileRegistry == nil { - db.FileRegistry = preg.GlobalFiles + db.FileRegistry = protoregistry.GlobalFiles } fd := newRawFile(db) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 98ab142ae..7c3689bae 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -17,7 +17,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -43,9 +43,9 @@ type ( L2 *FileL2 } FileL1 struct { - Syntax pref.Syntax + Syntax protoreflect.Syntax Path string - Package pref.FullName + Package protoreflect.FullName Enums Enums Messages Messages @@ -53,36 +53,36 @@ type ( Services Services } FileL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Imports FileImports Locations SourceLocations } ) -func (fd *File) ParentFile() pref.FileDescriptor { return fd } -func (fd *File) Parent() pref.Descriptor { return nil } -func (fd *File) Index() int { return 0 } -func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } -func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } -func (fd *File) FullName() pref.FullName { return fd.L1.Package } -func (fd *File) IsPlaceholder() bool { return false } -func (fd *File) Options() pref.ProtoMessage { +func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd } +func (fd *File) Parent() protoreflect.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax } +func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() protoreflect.ProtoMessage { if f := fd.lazyInit().Options; f != nil { return f() } return descopts.File } -func (fd *File) Path() string { return fd.L1.Path } -func (fd *File) Package() pref.FullName { return fd.L1.Package } -func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } -func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } -func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } -func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } -func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } -func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } -func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *File) ProtoType(pref.FileDescriptor) {} -func (fd *File) ProtoInternal(pragma.DoNotImplement) {} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() protoreflect.FullName { return fd.L1.Package } +func (fd *File) Imports() protoreflect.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() protoreflect.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() protoreflect.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() protoreflect.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() protoreflect.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() protoreflect.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(protoreflect.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} func (fd *File) lazyInit() *FileL2 { if atomic.LoadUint32(&fd.once) == 0 { @@ -119,7 +119,7 @@ type ( eagerValues bool // controls whether EnumL2.Values is already populated } EnumL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Values EnumValues ReservedNames Names ReservedRanges EnumRanges @@ -130,41 +130,41 @@ type ( L1 EnumValueL1 } EnumValueL1 struct { - Options func() pref.ProtoMessage - Number pref.EnumNumber + Options func() protoreflect.ProtoMessage + Number protoreflect.EnumNumber } ) -func (ed *Enum) Options() pref.ProtoMessage { +func (ed *Enum) Options() protoreflect.ProtoMessage { if f := ed.lazyInit().Options; f != nil { return f() } return descopts.Enum } -func (ed *Enum) Values() pref.EnumValueDescriptors { +func (ed *Enum) Values() protoreflect.EnumValueDescriptors { if ed.L1.eagerValues { return &ed.L2.Values } return &ed.lazyInit().Values } -func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } -func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } -func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {} func (ed *Enum) lazyInit() *EnumL2 { ed.L0.ParentFile.lazyInit() // implicitly initializes L2 return ed.L2 } -func (ed *EnumValue) Options() pref.ProtoMessage { +func (ed *EnumValue) Options() protoreflect.ProtoMessage { if f := ed.L1.Options; f != nil { return f() } return descopts.EnumValue } -func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } -func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } -func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} +func (ed *EnumValue) Number() protoreflect.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(protoreflect.EnumValueDescriptor) {} type ( Message struct { @@ -180,14 +180,14 @@ type ( IsMessageSet bool // promoted from google.protobuf.MessageOptions } MessageL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields Fields Oneofs Oneofs ReservedNames Names ReservedRanges FieldRanges RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality ExtensionRanges FieldRanges - ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + ExtensionRangeOptions []func() protoreflect.ProtoMessage // must be same length as ExtensionRanges } Field struct { @@ -195,10 +195,10 @@ type ( L1 FieldL1 } FieldL1 struct { - Options func() pref.ProtoMessage - Number pref.FieldNumber - Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers - Kind pref.Kind + Options func() protoreflect.ProtoMessage + Number protoreflect.FieldNumber + Cardinality protoreflect.Cardinality // must be consistent with Message.RequiredNumbers + Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions @@ -207,9 +207,9 @@ type ( HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions EnforceUTF8 bool // promoted from google.protobuf.FieldOptions Default defaultValue - ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } Oneof struct { @@ -217,35 +217,35 @@ type ( L1 OneofL1 } OneofL1 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Fields OneofFields // must be consistent with Message.Fields.ContainingOneof } ) -func (md *Message) Options() pref.ProtoMessage { +func (md *Message) Options() protoreflect.ProtoMessage { if f := md.lazyInit().Options; f != nil { return f() } return descopts.Message } -func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } -func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } -func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } -func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } -func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } -func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } -func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } -func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() protoreflect.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() protoreflect.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() protoreflect.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() protoreflect.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() protoreflect.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() protoreflect.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) protoreflect.ProtoMessage { if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { return f() } return descopts.ExtensionRange } -func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } -func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } -func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } -func (md *Message) ProtoType(pref.MessageDescriptor) {} -func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) Enums() protoreflect.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(protoreflect.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } func (md *Message) lazyInit() *MessageL2 { md.L0.ParentFile.lazyInit() // implicitly initializes L2 return md.L2 @@ -260,28 +260,28 @@ func (md *Message) IsMessageSet() bool { return md.L1.IsMessageSet } -func (fd *Field) Options() pref.ProtoMessage { +func (fd *Field) Options() protoreflect.ProtoMessage { if f := fd.L1.Options; f != nil { return f() } return descopts.Field } -func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } -func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } -func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } -func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } -func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } -func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } +func (fd *Field) Number() protoreflect.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() protoreflect.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() protoreflect.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.StringName.hasJSON } +func (fd *Field) JSONName() string { return fd.L1.StringName.getJSON(fd) } +func (fd *Field) TextName() string { return fd.L1.StringName.getText(fd) } func (fd *Field) HasPresence() bool { - return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) + return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) } func (fd *Field) HasOptionalKeyword() bool { - return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional + return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional } func (fd *Field) IsPacked() bool { - if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Repeated { switch fd.L1.Kind { - case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind: default: return true } @@ -290,40 +290,40 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } -func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } -func (fd *Field) MapKey() pref.FieldDescriptor { +func (fd *Field) MapKey() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Key_field_number) } -func (fd *Field) MapValue() pref.FieldDescriptor { +func (fd *Field) MapValue() protoreflect.FieldDescriptor { if !fd.IsMap() { return nil } return fd.Message().Fields().ByNumber(genid.MapEntry_Value_field_number) } -func (fd *Field) HasDefault() bool { return fd.L1.Default.has } -func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } -func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } -func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } -func (fd *Field) ContainingMessage() pref.MessageDescriptor { - return fd.L0.Parent.(pref.MessageDescriptor) +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() protoreflect.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() protoreflect.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() protoreflect.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() protoreflect.MessageDescriptor { + return fd.L0.Parent.(protoreflect.MessageDescriptor) } -func (fd *Field) Enum() pref.EnumDescriptor { +func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } -func (fd *Field) Message() pref.MessageDescriptor { +func (fd *Field) Message() protoreflect.MessageDescriptor { if fd.L1.IsWeak { if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } } return fd.L1.Message } -func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } -func (fd *Field) ProtoType(pref.FieldDescriptor) {} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {} // EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 // validation for the string field. This exists for Google-internal use only @@ -336,21 +336,21 @@ func (fd *Field) EnforceUTF8() bool { if fd.L1.HasEnforceUTF8 { return fd.L1.EnforceUTF8 } - return fd.L0.ParentFile.L1.Syntax == pref.Proto3 + return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 } func (od *Oneof) IsSynthetic() bool { - return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() + return od.L0.ParentFile.L1.Syntax == protoreflect.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() } -func (od *Oneof) Options() pref.ProtoMessage { +func (od *Oneof) Options() protoreflect.ProtoMessage { if f := od.L1.Options; f != nil { return f() } return descopts.Oneof } -func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } -func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } -func (od *Oneof) ProtoType(pref.OneofDescriptor) {} +func (od *Oneof) Fields() protoreflect.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(protoreflect.OneofDescriptor) {} type ( Extension struct { @@ -359,55 +359,57 @@ type ( L2 *ExtensionL2 // protected by fileDesc.once } ExtensionL1 struct { - Number pref.FieldNumber - Extendee pref.MessageDescriptor - Cardinality pref.Cardinality - Kind pref.Kind + Number protoreflect.FieldNumber + Extendee protoreflect.MessageDescriptor + Cardinality protoreflect.Cardinality + Kind protoreflect.Kind } ExtensionL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsPacked bool // promoted from google.protobuf.FieldOptions Default defaultValue - Enum pref.EnumDescriptor - Message pref.MessageDescriptor + Enum protoreflect.EnumDescriptor + Message protoreflect.MessageDescriptor } ) -func (xd *Extension) Options() pref.ProtoMessage { +func (xd *Extension) Options() protoreflect.ProtoMessage { if f := xd.lazyInit().Options; f != nil { return f() } return descopts.Field } -func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } -func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } -func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } -func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } -func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } -func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } -func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) Number() protoreflect.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() protoreflect.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() protoreflect.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().StringName.hasJSON } +func (xd *Extension) JSONName() string { return xd.lazyInit().StringName.getJSON(xd) } +func (xd *Extension) TextName() string { return xd.lazyInit().StringName.getText(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != protoreflect.Repeated } func (xd *Extension) HasOptionalKeyword() bool { - return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional -} -func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } -func (xd *Extension) IsExtension() bool { return true } -func (xd *Extension) IsWeak() bool { return false } -func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } -func (xd *Extension) IsMap() bool { return false } -func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } -func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } -func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } -func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } -func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } -func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } -func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } -func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } -func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } -func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } -func (xd *Extension) ProtoType(pref.FieldDescriptor) {} -func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} + return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) MapValue() protoreflect.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() protoreflect.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() protoreflect.EnumValueDescriptor { + return xd.lazyInit().Default.enum +} +func (xd *Extension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() protoreflect.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() protoreflect.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() protoreflect.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(protoreflect.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} func (xd *Extension) lazyInit() *ExtensionL2 { xd.L0.ParentFile.lazyInit() // implicitly initializes L2 return xd.L2 @@ -421,7 +423,7 @@ type ( } ServiceL1 struct{} ServiceL2 struct { - Options func() pref.ProtoMessage + Options func() protoreflect.ProtoMessage Methods Methods } @@ -430,48 +432,48 @@ type ( L1 MethodL1 } MethodL1 struct { - Options func() pref.ProtoMessage - Input pref.MessageDescriptor - Output pref.MessageDescriptor + Options func() protoreflect.ProtoMessage + Input protoreflect.MessageDescriptor + Output protoreflect.MessageDescriptor IsStreamingClient bool IsStreamingServer bool } ) -func (sd *Service) Options() pref.ProtoMessage { +func (sd *Service) Options() protoreflect.ProtoMessage { if f := sd.lazyInit().Options; f != nil { return f() } return descopts.Service } -func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } -func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } -func (sd *Service) ProtoType(pref.ServiceDescriptor) {} -func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) Methods() protoreflect.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(protoreflect.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} func (sd *Service) lazyInit() *ServiceL2 { sd.L0.ParentFile.lazyInit() // implicitly initializes L2 return sd.L2 } -func (md *Method) Options() pref.ProtoMessage { +func (md *Method) Options() protoreflect.ProtoMessage { if f := md.L1.Options; f != nil { return f() } return descopts.Method } -func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } -func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } -func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } -func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } -func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } -func (md *Method) ProtoType(pref.MethodDescriptor) {} -func (md *Method) ProtoInternal(pragma.DoNotImplement) {} +func (md *Method) Input() protoreflect.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() protoreflect.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(protoreflect.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} // Surrogate files are can be used to create standalone descriptors // where the syntax is only information derived from the parent file. var ( - SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} - SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} + SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}} ) type ( @@ -479,24 +481,24 @@ type ( L0 BaseL0 } BaseL0 struct { - FullName pref.FullName // must be populated - ParentFile *File // must be populated - Parent pref.Descriptor + FullName protoreflect.FullName // must be populated + ParentFile *File // must be populated + Parent protoreflect.Descriptor Index int } ) -func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } -func (d *Base) FullName() pref.FullName { return d.L0.FullName } -func (d *Base) ParentFile() pref.FileDescriptor { +func (d *Base) Name() protoreflect.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() protoreflect.FullName { return d.L0.FullName } +func (d *Base) ParentFile() protoreflect.FileDescriptor { if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { return nil // surrogate files are not real parents } return d.L0.ParentFile } -func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Parent() protoreflect.Descriptor { return d.L0.Parent } func (d *Base) Index() int { return d.L0.Index } -func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) Syntax() protoreflect.Syntax { return d.L0.ParentFile.Syntax() } func (d *Base) IsPlaceholder() bool { return false } func (d *Base) ProtoInternal(pragma.DoNotImplement) {} @@ -513,7 +515,7 @@ func (s *stringName) InitJSON(name string) { s.nameJSON = name } -func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { +func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName { s.once.Do(func() { if fd.IsExtension() { // For extensions, JSON and text are formatted the same way. @@ -533,7 +535,7 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { // Format the text name. s.nameText = string(fd.Name()) - if fd.Kind() == pref.GroupKind { + if fd.Kind() == protoreflect.GroupKind { s.nameText = string(fd.Message().Name()) } } @@ -541,10 +543,10 @@ func (s *stringName) lazyInit(fd pref.FieldDescriptor) *stringName { return s } -func (s *stringName) getJSON(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } -func (s *stringName) getText(fd pref.FieldDescriptor) string { return s.lazyInit(fd).nameText } +func (s *stringName) getJSON(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameJSON } +func (s *stringName) getText(fd protoreflect.FieldDescriptor) string { return s.lazyInit(fd).nameText } -func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { +func DefaultValue(v protoreflect.Value, ev protoreflect.EnumValueDescriptor) defaultValue { dv := defaultValue{has: v.IsValid(), val: v, enum: ev} if b, ok := v.Interface().([]byte); ok { // Store a copy of the default bytes, so that we can detect @@ -554,9 +556,9 @@ func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { return dv } -func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { - var evs pref.EnumValueDescriptors - if k == pref.EnumKind { +func unmarshalDefault(b []byte, k protoreflect.Kind, pf *File, ed protoreflect.EnumDescriptor) defaultValue { + var evs protoreflect.EnumValueDescriptors + if k == protoreflect.EnumKind { // If the enum is declared within the same file, be careful not to // blindly call the Values method, lest we bind ourselves in a deadlock. if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { @@ -567,9 +569,9 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d // If we are unable to resolve the enum dependency, use a placeholder // enum value since we will not be able to parse the default value. - if ed.IsPlaceholder() && pref.Name(b).IsValid() { - v := pref.ValueOfEnum(0) - ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + if ed.IsPlaceholder() && protoreflect.Name(b).IsValid() { + v := protoreflect.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(protoreflect.Name(b))) return DefaultValue(v, ev) } } @@ -583,41 +585,41 @@ func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) d type defaultValue struct { has bool - val pref.Value - enum pref.EnumValueDescriptor + val protoreflect.Value + enum protoreflect.EnumValueDescriptor bytes []byte } -func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { +func (dv *defaultValue) get(fd protoreflect.FieldDescriptor) protoreflect.Value { // Return the zero value as the default if unpopulated. if !dv.has { - if fd.Cardinality() == pref.Repeated { - return pref.Value{} + if fd.Cardinality() == protoreflect.Repeated { + return protoreflect.Value{} } switch fd.Kind() { - case pref.BoolKind: - return pref.ValueOfBool(false) - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: - return pref.ValueOfInt32(0) - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: - return pref.ValueOfInt64(0) - case pref.Uint32Kind, pref.Fixed32Kind: - return pref.ValueOfUint32(0) - case pref.Uint64Kind, pref.Fixed64Kind: - return pref.ValueOfUint64(0) - case pref.FloatKind: - return pref.ValueOfFloat32(0) - case pref.DoubleKind: - return pref.ValueOfFloat64(0) - case pref.StringKind: - return pref.ValueOfString("") - case pref.BytesKind: - return pref.ValueOfBytes(nil) - case pref.EnumKind: + case protoreflect.BoolKind: + return protoreflect.ValueOfBool(false) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return protoreflect.ValueOfInt32(0) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return protoreflect.ValueOfInt64(0) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return protoreflect.ValueOfUint32(0) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return protoreflect.ValueOfUint64(0) + case protoreflect.FloatKind: + return protoreflect.ValueOfFloat32(0) + case protoreflect.DoubleKind: + return protoreflect.ValueOfFloat64(0) + case protoreflect.StringKind: + return protoreflect.ValueOfString("") + case protoreflect.BytesKind: + return protoreflect.ValueOfBytes(nil) + case protoreflect.EnumKind: if evs := fd.Enum().Values(); evs.Len() > 0 { - return pref.ValueOfEnum(evs.Get(0).Number()) + return protoreflect.ValueOfEnum(evs.Get(0).Number()) } - return pref.ValueOfEnum(0) + return protoreflect.ValueOfEnum(0) } } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 66e1fee52..4a1584c9d 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // fileRaw is a data struct used when initializing a file descriptor from @@ -95,7 +95,7 @@ func (fd *File) unmarshalSeed(b []byte) { sb := getBuilder() defer putBuilder(sb) - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions, numServices int var posEnums, posMessages, posExtensions, posServices int b0 := b @@ -110,16 +110,16 @@ func (fd *File) unmarshalSeed(b []byte) { case genid.FileDescriptorProto_Syntax_field_number: switch string(v) { case "proto2": - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 case "proto3": - fd.L1.Syntax = pref.Proto3 + fd.L1.Syntax = protoreflect.Proto3 default: panic("invalid syntax") } case genid.FileDescriptorProto_Name_field_number: fd.L1.Path = sb.MakeString(v) case genid.FileDescriptorProto_Package_field_number: - fd.L1.Package = pref.FullName(sb.MakeString(v)) + fd.L1.Package = protoreflect.FullName(sb.MakeString(v)) case genid.FileDescriptorProto_EnumType_field_number: if prevField != genid.FileDescriptorProto_EnumType_field_number { if numEnums > 0 { @@ -163,7 +163,7 @@ func (fd *File) unmarshalSeed(b []byte) { // If syntax is missing, it is assumed to be proto2. if fd.L1.Syntax == 0 { - fd.L1.Syntax = pref.Proto2 + fd.L1.Syntax = protoreflect.Proto2 } // Must allocate all declarations before parsing each descriptor type @@ -219,7 +219,7 @@ func (fd *File) unmarshalSeed(b []byte) { } } -func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { ed.L0.ParentFile = pf ed.L0.Parent = pd ed.L0.Index = i @@ -271,12 +271,12 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Desc } } -func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i - var prevField pref.FieldNumber + var prevField protoreflect.FieldNumber var numEnums, numMessages, numExtensions int var posEnums, posMessages, posExtensions int b0 := b @@ -387,7 +387,7 @@ func (md *Message) unmarshalSeedOptions(b []byte) { } } -func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { xd.L0.ParentFile = pf xd.L0.Parent = pd xd.L0.Index = i @@ -401,11 +401,11 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - xd.L1.Number = pref.FieldNumber(v) + xd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - xd.L1.Cardinality = pref.Cardinality(v) + xd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - xd.L1.Kind = pref.Kind(v) + xd.L1.Kind = protoreflect.Kind(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -423,7 +423,7 @@ func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref } } -func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { sd.L0.ParentFile = pf sd.L0.Parent = pd sd.L0.Index = i @@ -459,13 +459,13 @@ func putBuilder(b *strs.Builder) { // makeFullName converts b to a protoreflect.FullName, // where b must start with a leading dot. -func makeFullName(sb *strs.Builder, b []byte) pref.FullName { +func makeFullName(sb *strs.Builder, b []byte) protoreflect.FullName { if len(b) == 0 || b[0] != '.' { panic("name reference must be fully qualified") } - return pref.FullName(sb.MakeString(b[1:])) + return protoreflect.FullName(sb.MakeString(b[1:])) } -func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { - return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +func appendFullName(sb *strs.Builder, prefix protoreflect.FullName, suffix []byte) protoreflect.FullName { + return sb.AppendFullName(prefix, protoreflect.Name(strs.UnsafeString(suffix))) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 198451e3e..736a19a75 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -13,7 +13,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) func (fd *File) lazyRawInit() { @@ -39,10 +39,10 @@ func (file *File) resolveMessages() { // Resolve message field dependency. switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) depIdx++ } @@ -62,10 +62,10 @@ func (file *File) resolveExtensions() { // Resolve extension field dependency. switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) depIdx++ } @@ -92,7 +92,7 @@ func (file *File) resolveServices() { } } -func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { +func (file *File) resolveEnumDependency(ed protoreflect.EnumDescriptor, i, j int32) protoreflect.EnumDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { @@ -105,12 +105,12 @@ func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref } } if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { - return d.(pref.EnumDescriptor) + return d.(protoreflect.EnumDescriptor) } return ed } -func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { +func (file *File) resolveMessageDependency(md protoreflect.MessageDescriptor, i, j int32) protoreflect.MessageDescriptor { r := file.builder.FileRegistry if r, ok := r.(resolverByIndex); ok { if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { @@ -123,7 +123,7 @@ func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32 } } if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { - return d.(pref.MessageDescriptor) + return d.(protoreflect.MessageDescriptor) } return md } @@ -158,7 +158,7 @@ func (fd *File) unmarshalFull(b []byte) { if imp == nil { imp = PlaceholderFile(path) } - fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp}) case genid.FileDescriptorProto_EnumType_field_number: fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) enumIdx++ @@ -199,7 +199,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { case genid.EnumDescriptorProto_Value_field_number: rawValues = append(rawValues, v) case genid.EnumDescriptorProto_ReservedName_field_number: - ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.EnumDescriptorProto_ReservedRange_field_number: ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) case genid.EnumDescriptorProto_Options_field_number: @@ -219,7 +219,7 @@ func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) } -func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { +func unmarshalEnumReservedRange(b []byte) (r [2]protoreflect.EnumNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -229,9 +229,9 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { b = b[m:] switch num { case genid.EnumDescriptorProto_EnumReservedRange_Start_field_number: - r[0] = pref.EnumNumber(v) + r[0] = protoreflect.EnumNumber(v) case genid.EnumDescriptorProto_EnumReservedRange_End_field_number: - r[1] = pref.EnumNumber(v) + r[1] = protoreflect.EnumNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -241,7 +241,7 @@ func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { return r } -func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { vd.L0.ParentFile = pf vd.L0.Parent = pd vd.L0.Index = i @@ -256,7 +256,7 @@ func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref b = b[m:] switch num { case genid.EnumValueDescriptorProto_Number_field_number: - vd.L1.Number = pref.EnumNumber(v) + vd.L1.Number = protoreflect.EnumNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -294,7 +294,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { case genid.DescriptorProto_OneofDecl_field_number: rawOneofs = append(rawOneofs, v) case genid.DescriptorProto_ReservedName_field_number: - md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, protoreflect.Name(sb.MakeString(v))) case genid.DescriptorProto_ReservedRange_field_number: md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) case genid.DescriptorProto_ExtensionRange_field_number: @@ -326,7 +326,7 @@ func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { for i, b := range rawFields { fd := &md.L2.Fields.List[i] fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) - if fd.L1.Cardinality == pref.Required { + if fd.L1.Cardinality == protoreflect.Required { md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) } } @@ -359,7 +359,7 @@ func (md *Message) unmarshalOptions(b []byte) { } } -func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { +func unmarshalMessageReservedRange(b []byte) (r [2]protoreflect.FieldNumber) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -369,9 +369,9 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { b = b[m:] switch num { case genid.DescriptorProto_ReservedRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ReservedRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } default: m := protowire.ConsumeFieldValue(num, typ, b) @@ -381,7 +381,7 @@ func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { return r } -func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { +func unmarshalMessageExtensionRange(b []byte) (r [2]protoreflect.FieldNumber, rawOptions []byte) { for len(b) > 0 { num, typ, n := protowire.ConsumeTag(b) b = b[n:] @@ -391,9 +391,9 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions b = b[m:] switch num { case genid.DescriptorProto_ExtensionRange_Start_field_number: - r[0] = pref.FieldNumber(v) + r[0] = protoreflect.FieldNumber(v) case genid.DescriptorProto_ExtensionRange_End_field_number: - r[1] = pref.FieldNumber(v) + r[1] = protoreflect.FieldNumber(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -410,7 +410,7 @@ func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions return r, rawOptions } -func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { fd.L0.ParentFile = pf fd.L0.Parent = pd fd.L0.Index = i @@ -426,11 +426,11 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des b = b[m:] switch num { case genid.FieldDescriptorProto_Number_field_number: - fd.L1.Number = pref.FieldNumber(v) + fd.L1.Number = protoreflect.FieldNumber(v) case genid.FieldDescriptorProto_Label_field_number: - fd.L1.Cardinality = pref.Cardinality(v) + fd.L1.Cardinality = protoreflect.Cardinality(v) case genid.FieldDescriptorProto_Type_field_number: - fd.L1.Kind = pref.Kind(v) + fd.L1.Kind = protoreflect.Kind(v) case genid.FieldDescriptorProto_OneofIndex_field_number: // In Message.unmarshalFull, we allocate slices for both // the field and oneof descriptors before unmarshaling either @@ -453,7 +453,7 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des case genid.FieldDescriptorProto_JsonName_field_number: fd.L1.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + fd.L1.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -468,9 +468,9 @@ func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Des if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch fd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: fd.L1.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: fd.L1.Message = PlaceholderMessage(name) } } @@ -504,7 +504,7 @@ func (fd *Field) unmarshalOptions(b []byte) { } } -func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { od.L0.ParentFile = pf od.L0.Parent = pd od.L0.Index = i @@ -553,7 +553,7 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { case genid.FieldDescriptorProto_JsonName_field_number: xd.L2.StringName.InitJSON(sb.MakeString(v)) case genid.FieldDescriptorProto_DefaultValue_field_number: - xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + xd.L2.Default.val = protoreflect.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions case genid.FieldDescriptorProto_TypeName_field_number: rawTypeName = v case genid.FieldDescriptorProto_Options_field_number: @@ -568,9 +568,9 @@ func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { if rawTypeName != nil { name := makeFullName(sb, rawTypeName) switch xd.L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: xd.L2.Enum = PlaceholderEnum(name) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: xd.L2.Message = PlaceholderMessage(name) } } @@ -627,7 +627,7 @@ func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) } -func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd protoreflect.Descriptor, i int) { md.L0.ParentFile = pf md.L0.Parent = pd md.L0.Index = i @@ -680,18 +680,18 @@ func appendOptions(dst, src []byte) []byte { // // The type of message to unmarshal to is passed as a pointer since the // vars in descopts may not yet be populated at the time this function is called. -func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { +func (db *Builder) optionsUnmarshaler(p *protoreflect.ProtoMessage, b []byte) func() protoreflect.ProtoMessage { if b == nil { return nil } - var opts pref.ProtoMessage + var opts protoreflect.ProtoMessage var once sync.Once - return func() pref.ProtoMessage { + return func() protoreflect.ProtoMessage { once.Do(func() { if *p == nil { panic("Descriptor.Options called without importing the descriptor package") } - opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(protoreflect.ProtoMessage) if err := (proto.UnmarshalOptions{ AllowPartial: true, Resolver: db.TypeResolver, diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go index aa294fff9..e3b6587da 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -17,31 +17,30 @@ import ( "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) -type FileImports []pref.FileImport +type FileImports []protoreflect.FileImport func (p *FileImports) Len() int { return len(*p) } -func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Get(i int) protoreflect.FileImport { return (*p)[i] } func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} type Names struct { - List []pref.Name + List []protoreflect.Name once sync.Once - has map[pref.Name]int // protected by once + has map[protoreflect.Name]int // protected by once } func (p *Names) Len() int { return len(p.List) } -func (p *Names) Get(i int) pref.Name { return p.List[i] } -func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Get(i int) protoreflect.Name { return p.List[i] } +func (p *Names) Has(s protoreflect.Name) bool { return p.lazyInit().has[s] > 0 } func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } func (p *Names) ProtoInternal(pragma.DoNotImplement) {} func (p *Names) lazyInit() *Names { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.Name]int, len(p.List)) + p.has = make(map[protoreflect.Name]int, len(p.List)) for _, s := range p.List { p.has[s] = p.has[s] + 1 } @@ -67,14 +66,14 @@ func (p *Names) CheckValid() error { } type EnumRanges struct { - List [][2]pref.EnumNumber // start inclusive; end inclusive + List [][2]protoreflect.EnumNumber // start inclusive; end inclusive once sync.Once - sorted [][2]pref.EnumNumber // protected by once + sorted [][2]protoreflect.EnumNumber // protected by once } -func (p *EnumRanges) Len() int { return len(p.List) } -func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } -func (p *EnumRanges) Has(n pref.EnumNumber) bool { +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]protoreflect.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n protoreflect.EnumNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := enumRange(ls[i]); { @@ -129,14 +128,14 @@ func (r enumRange) String() string { } type FieldRanges struct { - List [][2]pref.FieldNumber // start inclusive; end exclusive + List [][2]protoreflect.FieldNumber // start inclusive; end exclusive once sync.Once - sorted [][2]pref.FieldNumber // protected by once + sorted [][2]protoreflect.FieldNumber // protected by once } -func (p *FieldRanges) Len() int { return len(p.List) } -func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } -func (p *FieldRanges) Has(n pref.FieldNumber) bool { +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]protoreflect.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n protoreflect.FieldNumber) bool { for ls := p.lazyInit().sorted; len(ls) > 0; { i := len(ls) / 2 switch r := fieldRange(ls[i]); { @@ -221,17 +220,17 @@ func (r fieldRange) String() string { } type FieldNumbers struct { - List []pref.FieldNumber + List []protoreflect.FieldNumber once sync.Once - has map[pref.FieldNumber]struct{} // protected by once + has map[protoreflect.FieldNumber]struct{} // protected by once } -func (p *FieldNumbers) Len() int { return len(p.List) } -func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } -func (p *FieldNumbers) Has(n pref.FieldNumber) bool { +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) protoreflect.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n protoreflect.FieldNumber) bool { p.once.Do(func() { if len(p.List) > 0 { - p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + p.has = make(map[protoreflect.FieldNumber]struct{}, len(p.List)) for _, n := range p.List { p.has[n] = struct{}{} } @@ -244,30 +243,38 @@ func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} type OneofFields struct { - List []pref.FieldDescriptor + List []protoreflect.FieldDescriptor once sync.Once - byName map[pref.Name]pref.FieldDescriptor // protected by once - byJSON map[string]pref.FieldDescriptor // protected by once - byText map[string]pref.FieldDescriptor // protected by once - byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once + byName map[protoreflect.Name]protoreflect.FieldDescriptor // protected by once + byJSON map[string]protoreflect.FieldDescriptor // protected by once + byText map[string]protoreflect.FieldDescriptor // protected by once + byNum map[protoreflect.FieldNumber]protoreflect.FieldDescriptor // protected by once } -func (p *OneofFields) Len() int { return len(p.List) } -func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } -func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } -func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } -func (p *OneofFields) ByTextName(s string) pref.FieldDescriptor { return p.lazyInit().byText[s] } -func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } -func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } -func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) protoreflect.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + return p.lazyInit().byName[s] +} +func (p *OneofFields) ByJSONName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byJSON[s] +} +func (p *OneofFields) ByTextName(s string) protoreflect.FieldDescriptor { + return p.lazyInit().byText[s] +} +func (p *OneofFields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + return p.lazyInit().byNum[n] +} +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} func (p *OneofFields) lazyInit() *OneofFields { p.once.Do(func() { if len(p.List) > 0 { - p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) - p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byText = make(map[string]pref.FieldDescriptor, len(p.List)) - p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + p.byName = make(map[protoreflect.Name]protoreflect.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byText = make(map[string]protoreflect.FieldDescriptor, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor, len(p.List)) for _, f := range p.List { // Field names and numbers are guaranteed to be unique. p.byName[f.Name()] = f @@ -284,123 +291,123 @@ type SourceLocations struct { // List is a list of SourceLocations. // The SourceLocation.Next field does not need to be populated // as it will be lazily populated upon first need. - List []pref.SourceLocation + List []protoreflect.SourceLocation // File is the parent file descriptor that these locations are relative to. // If non-nil, ByDescriptor verifies that the provided descriptor // is a child of this file descriptor. - File pref.FileDescriptor + File protoreflect.FileDescriptor once sync.Once byPath map[pathKey]int } -func (p *SourceLocations) Len() int { return len(p.List) } -func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.lazyInit().List[i] } -func (p *SourceLocations) byKey(k pathKey) pref.SourceLocation { +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) protoreflect.SourceLocation { return p.lazyInit().List[i] } +func (p *SourceLocations) byKey(k pathKey) protoreflect.SourceLocation { if i, ok := p.lazyInit().byPath[k]; ok { return p.List[i] } - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } -func (p *SourceLocations) ByPath(path pref.SourcePath) pref.SourceLocation { +func (p *SourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation { return p.byKey(newPathKey(path)) } -func (p *SourceLocations) ByDescriptor(desc pref.Descriptor) pref.SourceLocation { +func (p *SourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation { if p.File != nil && desc != nil && p.File != desc.ParentFile() { - return pref.SourceLocation{} // mismatching parent files + return protoreflect.SourceLocation{} // mismatching parent files } var pathArr [16]int32 path := pathArr[:0] for { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: // Reverse the path since it was constructed in reverse. for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 { path[i], path[j] = path[j], path[i] } return p.byKey(newPathKey(path)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_MessageType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_NestedType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.FieldDescriptor: - isExtension := desc.(pref.FieldDescriptor).IsExtension() + case protoreflect.FieldDescriptor: + isExtension := desc.(protoreflect.FieldDescriptor).IsExtension() path = append(path, int32(desc.Index())) desc = desc.Parent() if isExtension { switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Extension_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Extension_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } else { switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_Field_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } - case pref.OneofDescriptor: + case protoreflect.OneofDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_OneofDecl_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_EnumType_field_number)) - case pref.MessageDescriptor: + case protoreflect.MessageDescriptor: path = append(path, int32(genid.DescriptorProto_EnumType_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.EnumValueDescriptor: + case protoreflect.EnumValueDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.EnumDescriptor: + case protoreflect.EnumDescriptor: path = append(path, int32(genid.EnumDescriptorProto_Value_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.FileDescriptor: + case protoreflect.FileDescriptor: path = append(path, int32(genid.FileDescriptorProto_Service_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } - case pref.MethodDescriptor: + case protoreflect.MethodDescriptor: path = append(path, int32(desc.Index())) desc = desc.Parent() switch desc.(type) { - case pref.ServiceDescriptor: + case protoreflect.ServiceDescriptor: path = append(path, int32(genid.ServiceDescriptorProto_Method_field_number)) default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } default: - return pref.SourceLocation{} + return protoreflect.SourceLocation{} } } } @@ -435,7 +442,7 @@ type pathKey struct { str string // used if the path does not fit in arr } -func newPathKey(p pref.SourcePath) (k pathKey) { +func newPathKey(p protoreflect.SourcePath) (k pathKey) { if len(p) < len(k.arr) { for i, ps := range p { if ps < 0 || math.MaxUint8 <= ps { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go index dbf2c605b..28240ebc5 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -7,7 +7,7 @@ package filedesc import ( "google.golang.org/protobuf/internal/descopts" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) var ( @@ -30,78 +30,80 @@ var ( // PlaceholderFile is a placeholder, representing only the file path. type PlaceholderFile string -func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } -func (f PlaceholderFile) Parent() pref.Descriptor { return nil } -func (f PlaceholderFile) Index() int { return 0 } -func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } -func (f PlaceholderFile) Name() pref.Name { return "" } -func (f PlaceholderFile) FullName() pref.FullName { return "" } -func (f PlaceholderFile) IsPlaceholder() bool { return true } -func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } -func (f PlaceholderFile) Path() string { return string(f) } -func (f PlaceholderFile) Package() pref.FullName { return "" } -func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } -func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } -func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } -func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } -func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } -func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } -func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } +func (f PlaceholderFile) ParentFile() protoreflect.FileDescriptor { return f } +func (f PlaceholderFile) Parent() protoreflect.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() protoreflect.Syntax { return 0 } +func (f PlaceholderFile) Name() protoreflect.Name { return "" } +func (f PlaceholderFile) FullName() protoreflect.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() protoreflect.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() protoreflect.FullName { return "" } +func (f PlaceholderFile) Imports() protoreflect.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() protoreflect.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() protoreflect.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(protoreflect.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnum is a placeholder, representing only the full name. -type PlaceholderEnum pref.FullName +type PlaceholderEnum protoreflect.FullName -func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnum) Index() int { return 0 } -func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnum) IsPlaceholder() bool { return true } -func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } -func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } -func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } -func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } -func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } -func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnum) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnum) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnum) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() protoreflect.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderEnumValue is a placeholder, representing only the full name. -type PlaceholderEnumValue pref.FullName +type PlaceholderEnumValue protoreflect.FullName -func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } -func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } -func (e PlaceholderEnumValue) Index() int { return 0 } -func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } -func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } -func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } -func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } -func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } -func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } -func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } -func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } +func (e PlaceholderEnumValue) ParentFile() protoreflect.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() protoreflect.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() protoreflect.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() protoreflect.Name { return protoreflect.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() protoreflect.FullName { return protoreflect.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() protoreflect.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() protoreflect.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(protoreflect.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } // PlaceholderMessage is a placeholder, representing only the full name. -type PlaceholderMessage pref.FullName +type PlaceholderMessage protoreflect.FullName -func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } -func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } -func (m PlaceholderMessage) Index() int { return 0 } -func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } -func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } -func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } -func (m PlaceholderMessage) IsPlaceholder() bool { return true } -func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } -func (m PlaceholderMessage) IsMapEntry() bool { return false } -func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } -func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } -func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } -func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } -func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } -func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } -func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } -func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } -func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } -func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } -func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } +func (m PlaceholderMessage) ParentFile() protoreflect.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() protoreflect.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() protoreflect.Syntax { return 0 } +func (m PlaceholderMessage) Name() protoreflect.Name { return protoreflect.FullName(m).Name() } +func (m PlaceholderMessage) FullName() protoreflect.FullName { return protoreflect.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() protoreflect.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() protoreflect.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() protoreflect.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() protoreflect.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() protoreflect.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() protoreflect.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) protoreflect.ProtoMessage { + panic("index out of range") +} +func (m PlaceholderMessage) Messages() protoreflect.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() protoreflect.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() protoreflect.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(protoreflect.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index 0a0dd35de..f0e38c4ef 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -10,17 +10,16 @@ import ( "reflect" "google.golang.org/protobuf/internal/descopts" - fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filedesc" pimpl "google.golang.org/protobuf/internal/impl" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) // Builder constructs type descriptors from a raw file descriptor // and associated Go types for each enum and message declaration. // -// -// Flattened Ordering +// # Flattened Ordering // // The protobuf type system represents declarations as a tree. Certain nodes in // the tree require us to either associate it with a concrete Go type or to @@ -52,7 +51,7 @@ import ( // that children themselves may have. type Builder struct { // File is the underlying file descriptor builder. - File fdesc.Builder + File filedesc.Builder // GoTypes is a unique set of the Go types for all declarations and // dependencies. Each type is represented as a zero value of the Go type. @@ -108,22 +107,22 @@ type Builder struct { // TypeRegistry is the registry to register each type descriptor. // If nil, it uses protoregistry.GlobalTypes. TypeRegistry interface { - RegisterMessage(pref.MessageType) error - RegisterEnum(pref.EnumType) error - RegisterExtension(pref.ExtensionType) error + RegisterMessage(protoreflect.MessageType) error + RegisterEnum(protoreflect.EnumType) error + RegisterExtension(protoreflect.ExtensionType) error } } // Out is the output of the builder. type Out struct { - File pref.FileDescriptor + File protoreflect.FileDescriptor } func (tb Builder) Build() (out Out) { // Replace the resolver with one that resolves dependencies by index, // which is faster and more reliable than relying on the global registry. if tb.File.FileRegistry == nil { - tb.File.FileRegistry = preg.GlobalFiles + tb.File.FileRegistry = protoregistry.GlobalFiles } tb.File.FileRegistry = &resolverByIndex{ goTypes: tb.GoTypes, @@ -133,7 +132,7 @@ func (tb Builder) Build() (out Out) { // Initialize registry if unpopulated. if tb.TypeRegistry == nil { - tb.TypeRegistry = preg.GlobalTypes + tb.TypeRegistry = protoregistry.GlobalTypes } fbOut := tb.File.Build() @@ -183,23 +182,23 @@ func (tb Builder) Build() (out Out) { for i := range fbOut.Messages { switch fbOut.Messages[i].Name() { case "FileOptions": - descopts.File = messageGoTypes[i].(pref.ProtoMessage) + descopts.File = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumOptions": - descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + descopts.Enum = messageGoTypes[i].(protoreflect.ProtoMessage) case "EnumValueOptions": - descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + descopts.EnumValue = messageGoTypes[i].(protoreflect.ProtoMessage) case "MessageOptions": - descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + descopts.Message = messageGoTypes[i].(protoreflect.ProtoMessage) case "FieldOptions": - descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + descopts.Field = messageGoTypes[i].(protoreflect.ProtoMessage) case "OneofOptions": - descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + descopts.Oneof = messageGoTypes[i].(protoreflect.ProtoMessage) case "ExtensionRangeOptions": - descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + descopts.ExtensionRange = messageGoTypes[i].(protoreflect.ProtoMessage) case "ServiceOptions": - descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + descopts.Service = messageGoTypes[i].(protoreflect.ProtoMessage) case "MethodOptions": - descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + descopts.Method = messageGoTypes[i].(protoreflect.ProtoMessage) } } } @@ -216,11 +215,11 @@ func (tb Builder) Build() (out Out) { const listExtDeps = 2 var goType reflect.Type switch fbOut.Extensions[i].L1.Kind { - case pref.EnumKind: + case protoreflect.EnumKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) goType = reflect.TypeOf(tb.GoTypes[j]) depIdx++ @@ -242,22 +241,22 @@ func (tb Builder) Build() (out Out) { return out } -var goTypeForPBKind = map[pref.Kind]reflect.Type{ - pref.BoolKind: reflect.TypeOf(bool(false)), - pref.Int32Kind: reflect.TypeOf(int32(0)), - pref.Sint32Kind: reflect.TypeOf(int32(0)), - pref.Sfixed32Kind: reflect.TypeOf(int32(0)), - pref.Int64Kind: reflect.TypeOf(int64(0)), - pref.Sint64Kind: reflect.TypeOf(int64(0)), - pref.Sfixed64Kind: reflect.TypeOf(int64(0)), - pref.Uint32Kind: reflect.TypeOf(uint32(0)), - pref.Fixed32Kind: reflect.TypeOf(uint32(0)), - pref.Uint64Kind: reflect.TypeOf(uint64(0)), - pref.Fixed64Kind: reflect.TypeOf(uint64(0)), - pref.FloatKind: reflect.TypeOf(float32(0)), - pref.DoubleKind: reflect.TypeOf(float64(0)), - pref.StringKind: reflect.TypeOf(string("")), - pref.BytesKind: reflect.TypeOf([]byte(nil)), +var goTypeForPBKind = map[protoreflect.Kind]reflect.Type{ + protoreflect.BoolKind: reflect.TypeOf(bool(false)), + protoreflect.Int32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sint32Kind: reflect.TypeOf(int32(0)), + protoreflect.Sfixed32Kind: reflect.TypeOf(int32(0)), + protoreflect.Int64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sint64Kind: reflect.TypeOf(int64(0)), + protoreflect.Sfixed64Kind: reflect.TypeOf(int64(0)), + protoreflect.Uint32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Fixed32Kind: reflect.TypeOf(uint32(0)), + protoreflect.Uint64Kind: reflect.TypeOf(uint64(0)), + protoreflect.Fixed64Kind: reflect.TypeOf(uint64(0)), + protoreflect.FloatKind: reflect.TypeOf(float32(0)), + protoreflect.DoubleKind: reflect.TypeOf(float64(0)), + protoreflect.StringKind: reflect.TypeOf(string("")), + protoreflect.BytesKind: reflect.TypeOf([]byte(nil)), } type depIdxs []int32 @@ -274,13 +273,13 @@ type ( fileRegistry } fileRegistry interface { - FindFileByPath(string) (pref.FileDescriptor, error) - FindDescriptorByName(pref.FullName) (pref.Descriptor, error) - RegisterFile(pref.FileDescriptor) error + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error) + RegisterFile(protoreflect.FileDescriptor) error } ) -func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.EnumDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { return &es[depIdx] } else { @@ -288,7 +287,7 @@ func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdes } } -func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []filedesc.Enum, ms []filedesc.Message) protoreflect.MessageDescriptor { if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { return &ms[depIdx-len(es)] } else { diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02..bda8e8cf3 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4..6d8d9bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go index abee5f30e..a371f98de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -12,8 +12,8 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Export is a zero-length named type that exists only to export a set of @@ -32,11 +32,11 @@ type enum = interface{} // EnumOf returns the protoreflect.Enum interface over e. // It returns nil if e is nil. -func (Export) EnumOf(e enum) pref.Enum { +func (Export) EnumOf(e enum) protoreflect.Enum { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e default: return legacyWrapEnum(reflect.ValueOf(e)) @@ -45,11 +45,11 @@ func (Export) EnumOf(e enum) pref.Enum { // EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. // It returns nil if e is nil. -func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { +func (Export) EnumDescriptorOf(e enum) protoreflect.EnumDescriptor { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Descriptor() default: return LegacyLoadEnumDesc(reflect.TypeOf(e)) @@ -58,11 +58,11 @@ func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { // EnumTypeOf returns the protoreflect.EnumType for e. // It returns nil if e is nil. -func (Export) EnumTypeOf(e enum) pref.EnumType { +func (Export) EnumTypeOf(e enum) protoreflect.EnumType { switch e := e.(type) { case nil: return nil - case pref.Enum: + case protoreflect.Enum: return e.Type() default: return legacyLoadEnumType(reflect.TypeOf(e)) @@ -71,7 +71,7 @@ func (Export) EnumTypeOf(e enum) pref.EnumType { // EnumStringOf returns the enum value as a string, either as the name if // the number is resolvable, or the number formatted as a string. -func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { +func (Export) EnumStringOf(ed protoreflect.EnumDescriptor, n protoreflect.EnumNumber) string { ev := ed.Values().ByNumber(n) if ev != nil { return string(ev.Name()) @@ -84,7 +84,7 @@ func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { type message = interface{} // legacyMessageWrapper wraps a v2 message as a v1 message. -type legacyMessageWrapper struct{ m pref.ProtoMessage } +type legacyMessageWrapper struct{ m protoreflect.ProtoMessage } func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } @@ -92,30 +92,30 @@ func (m legacyMessageWrapper) ProtoMessage() {} // ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. // It returns nil if m is nil. -func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { +func (Export) ProtoMessageV1Of(m message) protoiface.MessageV1 { switch mv := m.(type) { case nil: return nil - case piface.MessageV1: + case protoiface.MessageV1: return mv case unwrapper: return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return legacyMessageWrapper{mv} default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) } } -func (Export) protoMessageV2Of(m message) pref.ProtoMessage { +func (Export) protoMessageV2Of(m message) protoreflect.ProtoMessage { switch mv := m.(type) { case nil: return nil - case pref.ProtoMessage: + case protoreflect.ProtoMessage: return mv case legacyMessageWrapper: return mv.m - case piface.MessageV1: + case protoiface.MessageV1: return nil default: panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) @@ -124,7 +124,7 @@ func (Export) protoMessageV2Of(m message) pref.ProtoMessage { // ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. // It returns nil if m is nil. -func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { +func (Export) ProtoMessageV2Of(m message) protoreflect.ProtoMessage { if m == nil { return nil } @@ -136,7 +136,7 @@ func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { // MessageOf returns the protoreflect.Message interface over m. // It returns nil if m is nil. -func (Export) MessageOf(m message) pref.Message { +func (Export) MessageOf(m message) protoreflect.Message { if m == nil { return nil } @@ -148,7 +148,7 @@ func (Export) MessageOf(m message) pref.Message { // MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. // It returns nil if m is nil. -func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { +func (Export) MessageDescriptorOf(m message) protoreflect.MessageDescriptor { if m == nil { return nil } @@ -160,7 +160,7 @@ func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { // MessageTypeOf returns the protoreflect.MessageType for m. // It returns nil if m is nil. -func (Export) MessageTypeOf(m message) pref.MessageType { +func (Export) MessageTypeOf(m message) protoreflect.MessageType { if m == nil { return nil } @@ -172,6 +172,6 @@ func (Export) MessageTypeOf(m message) pref.MessageType { // MessageStringOf returns the message value as a string, // which is the message serialized in the protobuf text format. -func (Export) MessageStringOf(m pref.ProtoMessage) string { +func (Export) MessageStringOf(m protoreflect.ProtoMessage) string { return prototext.MarshalOptions{Multiline: false}.Format(m) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go index b82341e57..bff041edc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -8,18 +8,18 @@ import ( "sync" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) -func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { +func (mi *MessageInfo) checkInitialized(in protoiface.CheckInitializedInput) (protoiface.CheckInitializedOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() } else { p = in.Message.(*messageReflectWrapper).pointer() } - return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) + return protoiface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) } func (mi *MessageInfo) checkInitializedPointer(p pointer) error { @@ -90,7 +90,7 @@ var ( // needsInitCheck reports whether a message needs to be checked for partial initialization. // // It returns true if the message transitively includes any required or extension fields. -func needsInitCheck(md pref.MessageDescriptor) bool { +func needsInitCheck(md protoreflect.MessageDescriptor) bool { if v, ok := needsInitCheckMap.Load(md); ok { if has, ok := v.(bool); ok { return has @@ -101,7 +101,7 @@ func needsInitCheck(md pref.MessageDescriptor) bool { return needsInitCheckLocked(md) } -func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { +func needsInitCheckLocked(md protoreflect.MessageDescriptor) (has bool) { if v, ok := needsInitCheckMap.Load(md); ok { // If has is true, we've previously determined that this message // needs init checks. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 08d35170b..e74cefdc5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type extensionFieldInfo struct { @@ -23,7 +23,7 @@ type extensionFieldInfo struct { var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo -func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func getExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := xt.(*ExtensionInfo); ok { xi.lazyInit() return xi.info @@ -32,7 +32,7 @@ func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { } // legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. -func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { +func legacyLoadExtensionFieldInfo(xt protoreflect.ExtensionType) *extensionFieldInfo { if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { return xi.(*extensionFieldInfo) } @@ -43,7 +43,7 @@ func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { return e } -func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { +func makeExtensionFieldInfo(xd protoreflect.ExtensionDescriptor) *extensionFieldInfo { var wiretag uint64 if !xd.IsPacked() { wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) @@ -59,10 +59,10 @@ func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { // This is true for composite types, where we pass in a message, list, or map to fill in, // and for enums, where we pass in a prototype value to specify the concrete enum type. switch xd.Kind() { - case pref.MessageKind, pref.GroupKind, pref.EnumKind: + case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.EnumKind: e.unmarshalNeedsValue = true default: - if xd.Cardinality() == pref.Repeated { + if xd.Cardinality() == protoreflect.Repeated { e.unmarshalNeedsValue = true } } @@ -73,21 +73,21 @@ type lazyExtensionValue struct { atomicOnce uint32 // atomically set if value is valid mu sync.Mutex xi *extensionFieldInfo - value pref.Value + value protoreflect.Value b []byte - fn func() pref.Value + fn func() protoreflect.Value } type ExtensionField struct { - typ pref.ExtensionType + typ protoreflect.ExtensionType // value is either the value of GetValue, // or a *lazyExtensionValue that then returns the value of GetValue. - value pref.Value + value protoreflect.Value lazy *lazyExtensionValue } -func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { +func (f *ExtensionField) appendLazyBytes(xt protoreflect.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { if f.lazy == nil { f.lazy = &lazyExtensionValue{xi: xi} } @@ -97,7 +97,7 @@ func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFie f.lazy.b = append(f.lazy.b, b...) } -func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { +func (f *ExtensionField) canLazy(xt protoreflect.ExtensionType) bool { if f.typ == nil { return true } @@ -154,7 +154,7 @@ func (f *ExtensionField) lazyInit() { // Set sets the type and value of the extension field. // This must not be called concurrently. -func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { +func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) { f.typ = t f.value = v f.lazy = nil @@ -162,14 +162,14 @@ func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { // SetLazy sets the type and a value that is to be lazily evaluated upon first use. // This must not be called concurrently. -func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { +func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { f.typ = t f.lazy = &lazyExtensionValue{fn: fn} } // Value returns the value of the extension field. // This may be called concurrently. -func (f *ExtensionField) Value() pref.Value { +func (f *ExtensionField) Value() protoreflect.Value { if f.lazy != nil { if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { f.lazyInit() @@ -181,7 +181,7 @@ func (f *ExtensionField) Value() pref.Value { // Type returns the type of the extension field. // This may be called concurrently. -func (f ExtensionField) Type() pref.ExtensionType { +func (f ExtensionField) Type() protoreflect.ExtensionType { return f.typ } @@ -193,7 +193,7 @@ func (f ExtensionField) IsSet() bool { // IsLazy reports whether a field is lazily encoded. // It is exported for testing. -func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { +func IsLazy(m protoreflect.Message, fd protoreflect.FieldDescriptor) bool { var mi *MessageInfo var p pointer switch m := m.(type) { @@ -206,7 +206,7 @@ func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { default: return false } - xd, ok := fd.(pref.ExtensionTypeDescriptor) + xd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index cb4b482d1..3fadd241e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -12,9 +12,9 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) type errInvalidUTF8 struct{} @@ -30,7 +30,7 @@ func (errInvalidUTF8) Unwrap() error { return errors.Error } // to the appropriate field-specific function as necessary. // // The unmarshal function is set on each field individually as usual. -func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { +func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si structInfo) { fs := si.oneofsByName[od.Name()] ft := fs.Type oneofFields := make(map[reflect.Type]*coderFieldInfo) @@ -118,13 +118,13 @@ func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structIn } } -func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { +func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) }) } @@ -190,7 +190,7 @@ func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { } } -func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageInfo, @@ -280,7 +280,7 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.ProtoReflect(), }) @@ -288,27 +288,27 @@ func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarsh return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeMessage(m, tagsize, opts) } -func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendMessage(b, m, wiretag, opts) } -func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeMessageValue(b []byte, v protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeMessage(b, m, wtyp, opts) return v, out, err } -func isInitMessageValue(v pref.Value) error { +func isInitMessageValue(v protoreflect.Value) error { m := v.Message().Interface() return proto.CheckInitialized(m) } @@ -321,17 +321,17 @@ var coderMessageValue = valueCoderFuncs{ merge: mergeMessageValue, } -func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupValue(v protoreflect.Value, tagsize int, opts marshalOptions) int { m := v.Message().Interface() return sizeGroup(m, tagsize, opts) } -func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupValue(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { m := v.Message().Interface() return appendGroup(b, m, wiretag, opts) } -func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { +func consumeGroupValue(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) { m := v.Message().Interface() out, err := consumeGroup(b, m, num, wtyp, opts) return v, out, err @@ -345,7 +345,7 @@ var coderGroupValue = valueCoderFuncs{ merge: mergeMessageValue, } -func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -424,7 +424,7 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir if n < 0 { return out, errDecode } - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.ProtoReflect(), }) @@ -432,11 +432,11 @@ func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowir return out, err } out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } -func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeMessageSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ size: sizeMessageSliceInfo, @@ -555,7 +555,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: asMessage(mp).ProtoReflect(), }) @@ -564,7 +564,7 @@ func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowir } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -581,7 +581,7 @@ func isInitMessageSlice(p pointer, goType reflect.Type) error { // Slices of messages -func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -591,7 +591,7 @@ func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) i return n } -func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendMessageSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -608,30 +608,30 @@ func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts ma return b, nil } -func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeMessageSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.BytesType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } v, n := protowire.ConsumeBytes(b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: v, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } -func isInitMessageSliceValue(listv pref.Value) error { +func isInitMessageSliceValue(listv protoreflect.Value) error { list := listv.List() for i, llen := 0, list.Len(); i < llen; i++ { m := list.Get(i).Message().Interface() @@ -650,7 +650,7 @@ var coderMessageSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { +func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int { list := listv.List() n := 0 for i, llen := 0, list.Len(); i < llen; i++ { @@ -660,7 +660,7 @@ func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int return n } -func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { +func appendGroupSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { list := listv.List() mopts := opts.Options() for i, llen := 0, list.Len(); i < llen; i++ { @@ -676,26 +676,26 @@ func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts mars return b, nil } -func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { +func consumeGroupSliceValue(b []byte, listv protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { list := listv.List() if wtyp != protowire.StartGroupType { - return pref.Value{}, out, errUnknown + return protoreflect.Value{}, out, errUnknown } b, n := protowire.ConsumeGroup(num, b) if n < 0 { - return pref.Value{}, out, errDecode + return protoreflect.Value{}, out, errDecode } m := list.NewElement() - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: m.Message(), }) if err != nil { - return pref.Value{}, out, err + return protoreflect.Value{}, out, err } list.Append(m) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return listv, out, nil } @@ -707,7 +707,7 @@ var coderGroupSliceValue = valueCoderFuncs{ merge: mergeMessageListValue, } -func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { +func makeGroupSliceFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { num := fd.Number() if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ @@ -772,7 +772,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire return out, errDecode } mp := reflect.New(goType.Elem()) - o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + o, err := opts.Options().UnmarshalState(protoiface.UnmarshalInput{ Buf: b, Message: asMessage(mp).ProtoReflect(), }) @@ -781,7 +781,7 @@ func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire } p.AppendPointerSlice(pointerOfValue(mp)) out.n = n - out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + out.initialized = o.Flags&protoiface.UnmarshalInitialized != 0 return out, nil } @@ -822,8 +822,8 @@ func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFie return out, nil } -func asMessage(v reflect.Value) pref.ProtoMessage { - if m, ok := v.Interface().(pref.ProtoMessage); ok { +func asMessage(v reflect.Value) protoreflect.ProtoMessage { + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { return m } return legacyWrapMessage(v).Interface() diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index c1245fef4..111b9d16f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/genid" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapInfo struct { @@ -19,12 +19,12 @@ type mapInfo struct { valWiretag uint64 keyFuncs valueCoderFuncs valFuncs valueCoderFuncs - keyZero pref.Value - keyKind pref.Kind + keyZero protoreflect.Value + keyKind protoreflect.Kind conv *mapConverter } -func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { +func encoderFuncsForMap(fd protoreflect.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { // TODO: Consider generating specialized map coders. keyField := fd.MapKey() valField := fd.MapValue() @@ -44,7 +44,7 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage keyKind: keyField.Kind(), conv: conv, } - if valField.Kind() == pref.MessageKind { + if valField.Kind() == protoreflect.MessageKind { valueMessage = getMessageInfo(ft.Elem()) } @@ -68,9 +68,9 @@ func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage }, } switch valField.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: funcs.merge = mergeMapOfMessage - case pref.BytesKind: + case protoreflect.BytesKind: funcs.merge = mergeMapOfBytes default: funcs.merge = mergeMap @@ -135,7 +135,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo err := errUnknown switch num { case genid.MapEntry_Key_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { @@ -144,7 +144,7 @@ func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo key = v n = o.n case genid.MapEntry_Value_field_number: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) if err != nil { @@ -192,7 +192,7 @@ func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi err := errUnknown switch num { case 1: - var v pref.Value + var v protoreflect.Value var o unmarshalOutput v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) if err != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f..4b15493f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600..0b31b66ea 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index cd40527ff..6b2fdbb73 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -12,15 +12,15 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/order" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // coderMessageInfo contains per-message information used by the fast-path functions. // This is a different type from MessageInfo to keep MessageInfo as general-purpose as // possible. type coderMessageInfo struct { - methods piface.Methods + methods protoiface.Methods orderedCoderFields []*coderFieldInfo denseCoderFields []*coderFieldInfo @@ -38,13 +38,13 @@ type coderFieldInfo struct { funcs pointerCoderFuncs // fast-path per-field functions mi *MessageInfo // field's message ft reflect.Type - validation validationInfo // information used by message validation - num pref.FieldNumber // field number - offset offset // struct field offset - wiretag uint64 // field tag (number + wire type) - tagsize int // size of the varint-encoded tag - isPointer bool // true if IsNil may be called on the struct field - isRequired bool // true if field is required + validation validationInfo // information used by message validation + num protoreflect.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required } func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { @@ -125,8 +125,8 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { funcs: funcs, mi: childMessage, validation: newFieldValidationInfo(mi, si, fd, ft), - isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), - isRequired: fd.Cardinality() == pref.Required, + isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == protoreflect.Required, } mi.orderedCoderFields = append(mi.orderedCoderFields, cf) mi.coderFields[cf.num] = cf @@ -149,7 +149,7 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num }) - var maxDense pref.FieldNumber + var maxDense protoreflect.FieldNumber for _, cf := range mi.orderedCoderFields { if cf.num >= 16 && cf.num >= 2*maxDense { break @@ -175,12 +175,12 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { mi.needsInitCheck = needsInitCheck(mi.Desc) if mi.methods.Marshal == nil && mi.methods.Size == nil { - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic mi.methods.Marshal = mi.marshal mi.methods.Size = mi.size } if mi.methods.Unmarshal == nil { - mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Flags |= protoiface.SupportUnmarshalDiscardUnknown mi.methods.Unmarshal = mi.unmarshal } if mi.methods.CheckInitialized == nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3ae..145c577bd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go index e89971238..576dcf3aa 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -10,7 +10,7 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // pointerCoderFuncs is a set of pointer encoding functions. @@ -25,83 +25,83 @@ type pointerCoderFuncs struct { // valueCoderFuncs is a set of protoreflect.Value encoding functions. type valueCoderFuncs struct { - size func(v pref.Value, tagsize int, opts marshalOptions) int - marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) - unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) - isInit func(v pref.Value) error - merge func(dst, src pref.Value, opts mergeOptions) pref.Value + size func(v protoreflect.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v protoreflect.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v protoreflect.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (protoreflect.Value, unmarshalOutput, error) + isInit func(v protoreflect.Value) error + merge func(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value } // fieldCoder returns pointer functions for a field, used for operating on // struct fields. -func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { +func fieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { switch { case fd.IsMap(): return encoderFuncsForMap(fd, ft) - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): // Repeated fields (not packed). if ft.Kind() != reflect.Slice { break } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Slice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Slice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Slice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Slice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Slice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Slice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Slice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Slice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Slice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Slice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleSlice } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringSliceValidateUTF8 } @@ -114,19 +114,19 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringSlice } if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesSlice } - case pref.MessageKind: + case protoreflect.MessageKind: return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) - case pref.GroupKind: + case protoreflect.GroupKind: return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): // Packed repeated fields. // // Only repeated fields of primitive numeric types @@ -136,128 +136,128 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer } ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPackedSlice } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPackedSlice } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32PackedSlice } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32PackedSlice } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32PackedSlice } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64PackedSlice } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64PackedSlice } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64PackedSlice } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32PackedSlice } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32PackedSlice } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPackedSlice } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64PackedSlice } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64PackedSlice } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePackedSlice } } - case fd.Kind() == pref.MessageKind: + case fd.Kind() == protoreflect.MessageKind: return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) - case fd.Kind() == pref.GroupKind: + case fd.Kind() == protoreflect.GroupKind: return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) - case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + case fd.Syntax() == protoreflect.Proto3 && fd.ContainingOneof() == nil: // Populated oneof fields always encode even if set to the zero value, // which normally are not encoded in proto3. switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolNoZero } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumNoZero } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32NoZero } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32NoZero } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32NoZero } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64NoZero } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64NoZero } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64NoZero } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32NoZero } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32NoZero } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatNoZero } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64NoZero } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64NoZero } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoubleNoZero } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringNoZeroValidateUTF8 } @@ -270,7 +270,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytesNoZero } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringNoZero } @@ -281,133 +281,133 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer case ft.Kind() == reflect.Ptr: ft := ft.Elem() switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBoolPtr } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnumPtr } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32Ptr } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32Ptr } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32Ptr } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64Ptr } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64Ptr } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64Ptr } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32Ptr } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32Ptr } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloatPtr } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64Ptr } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64Ptr } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDoublePtr } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringPtrValidateUTF8 } if ft.Kind() == reflect.String { return nil, coderStringPtr } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderStringPtr } } default: switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if ft.Kind() == reflect.Bool { return nil, coderBool } - case pref.EnumKind: + case protoreflect.EnumKind: if ft.Kind() == reflect.Int32 { return nil, coderEnum } - case pref.Int32Kind: + case protoreflect.Int32Kind: if ft.Kind() == reflect.Int32 { return nil, coderInt32 } - case pref.Sint32Kind: + case protoreflect.Sint32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSint32 } - case pref.Uint32Kind: + case protoreflect.Uint32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderUint32 } - case pref.Int64Kind: + case protoreflect.Int64Kind: if ft.Kind() == reflect.Int64 { return nil, coderInt64 } - case pref.Sint64Kind: + case protoreflect.Sint64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSint64 } - case pref.Uint64Kind: + case protoreflect.Uint64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderUint64 } - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: if ft.Kind() == reflect.Int32 { return nil, coderSfixed32 } - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: if ft.Kind() == reflect.Uint32 { return nil, coderFixed32 } - case pref.FloatKind: + case protoreflect.FloatKind: if ft.Kind() == reflect.Float32 { return nil, coderFloat } - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: if ft.Kind() == reflect.Int64 { return nil, coderSfixed64 } - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: if ft.Kind() == reflect.Uint64 { return nil, coderFixed64 } - case pref.DoubleKind: + case protoreflect.DoubleKind: if ft.Kind() == reflect.Float64 { return nil, coderDouble } - case pref.StringKind: + case protoreflect.StringKind: if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { return nil, coderStringValidateUTF8 } @@ -420,7 +420,7 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { return nil, coderBytes } - case pref.BytesKind: + case protoreflect.BytesKind: if ft.Kind() == reflect.String { return nil, coderString } @@ -434,122 +434,122 @@ func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointer // encoderFuncsForValue returns value functions for a field, used for // extension values and map encoding. -func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { +func encoderFuncsForValue(fd protoreflect.FieldDescriptor) valueCoderFuncs { switch { - case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && !fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32SliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32SliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32SliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64SliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64SliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64SliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32SliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32SliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64SliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64SliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleSliceValue - case pref.StringKind: + case protoreflect.StringKind: // We don't have a UTF-8 validating coder for repeated string fields. // Value coders are used for extensions and maps. // Extensions are never proto3, and maps never contain lists. return coderStringSliceValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesSliceValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageSliceValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupSliceValue } - case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + case fd.Cardinality() == protoreflect.Repeated && fd.IsPacked(): switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolPackedSliceValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumPackedSliceValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32PackedSliceValue - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32PackedSliceValue - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32PackedSliceValue - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64PackedSliceValue - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64PackedSliceValue - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64PackedSliceValue - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32PackedSliceValue - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32PackedSliceValue - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatPackedSliceValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64PackedSliceValue - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64PackedSliceValue - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoublePackedSliceValue } default: switch fd.Kind() { default: - case pref.BoolKind: + case protoreflect.BoolKind: return coderBoolValue - case pref.EnumKind: + case protoreflect.EnumKind: return coderEnumValue - case pref.Int32Kind: + case protoreflect.Int32Kind: return coderInt32Value - case pref.Sint32Kind: + case protoreflect.Sint32Kind: return coderSint32Value - case pref.Uint32Kind: + case protoreflect.Uint32Kind: return coderUint32Value - case pref.Int64Kind: + case protoreflect.Int64Kind: return coderInt64Value - case pref.Sint64Kind: + case protoreflect.Sint64Kind: return coderSint64Value - case pref.Uint64Kind: + case protoreflect.Uint64Kind: return coderUint64Value - case pref.Sfixed32Kind: + case protoreflect.Sfixed32Kind: return coderSfixed32Value - case pref.Fixed32Kind: + case protoreflect.Fixed32Kind: return coderFixed32Value - case pref.FloatKind: + case protoreflect.FloatKind: return coderFloatValue - case pref.Sfixed64Kind: + case protoreflect.Sfixed64Kind: return coderSfixed64Value - case pref.Fixed64Kind: + case protoreflect.Fixed64Kind: return coderFixed64Value - case pref.DoubleKind: + case protoreflect.DoubleKind: return coderDoubleValue - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { return coderStringValueValidateUTF8 } return coderStringValue - case pref.BytesKind: + case protoreflect.BytesKind: return coderBytesValue - case pref.MessageKind: + case protoreflect.MessageKind: return coderMessageValue - case pref.GroupKind: + case protoreflect.GroupKind: return coderGroupValue } } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e2..757642e23 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index acd61bb50..11a6128ba 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // unwrapper unwraps the value to the underlying value. @@ -20,13 +20,13 @@ type unwrapper interface { // A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. type Converter interface { // PBValueOf converts a reflect.Value to a protoreflect.Value. - PBValueOf(reflect.Value) pref.Value + PBValueOf(reflect.Value) protoreflect.Value // GoValueOf converts a protoreflect.Value to a reflect.Value. - GoValueOf(pref.Value) reflect.Value + GoValueOf(protoreflect.Value) reflect.Value // IsValidPB returns whether a protoreflect.Value is compatible with this type. - IsValidPB(pref.Value) bool + IsValidPB(protoreflect.Value) bool // IsValidGo returns whether a reflect.Value is compatible with this type. IsValidGo(reflect.Value) bool @@ -34,12 +34,12 @@ type Converter interface { // New returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns a new mutable value. - New() pref.Value + New() protoreflect.Value // Zero returns a new field value. // For scalars, it returns the default value of the field. // For composite types, it returns an immutable, empty value. - Zero() pref.Value + Zero() protoreflect.Value } // NewConverter matches a Go type with a protobuf field and returns a Converter @@ -50,7 +50,7 @@ type Converter interface { // This matcher deliberately supports a wider range of Go types than what // protoc-gen-go historically generated to be able to automatically wrap some // v1 messages generated by other forks of protoc-gen-go. -func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case fd.IsList(): return newListConverter(t, fd) @@ -76,68 +76,68 @@ var ( ) var ( - boolZero = pref.ValueOfBool(false) - int32Zero = pref.ValueOfInt32(0) - int64Zero = pref.ValueOfInt64(0) - uint32Zero = pref.ValueOfUint32(0) - uint64Zero = pref.ValueOfUint64(0) - float32Zero = pref.ValueOfFloat32(0) - float64Zero = pref.ValueOfFloat64(0) - stringZero = pref.ValueOfString("") - bytesZero = pref.ValueOfBytes(nil) + boolZero = protoreflect.ValueOfBool(false) + int32Zero = protoreflect.ValueOfInt32(0) + int64Zero = protoreflect.ValueOfInt64(0) + uint32Zero = protoreflect.ValueOfUint32(0) + uint64Zero = protoreflect.ValueOfUint64(0) + float32Zero = protoreflect.ValueOfFloat32(0) + float64Zero = protoreflect.ValueOfFloat64(0) + stringZero = protoreflect.ValueOfString("") + bytesZero = protoreflect.ValueOfBytes(nil) ) -func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { - defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { - if fd.Cardinality() == pref.Repeated { +func newSingularConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { + defVal := func(fd protoreflect.FieldDescriptor, zero protoreflect.Value) protoreflect.Value { + if fd.Cardinality() == protoreflect.Repeated { // Default isn't defined for repeated fields. return zero } return fd.Default() } switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: if t.Kind() == reflect.Bool { return &boolConverter{t, defVal(fd, boolZero)} } - case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: if t.Kind() == reflect.Int32 { return &int32Converter{t, defVal(fd, int32Zero)} } - case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: if t.Kind() == reflect.Int64 { return &int64Converter{t, defVal(fd, int64Zero)} } - case pref.Uint32Kind, pref.Fixed32Kind: + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: if t.Kind() == reflect.Uint32 { return &uint32Converter{t, defVal(fd, uint32Zero)} } - case pref.Uint64Kind, pref.Fixed64Kind: + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: if t.Kind() == reflect.Uint64 { return &uint64Converter{t, defVal(fd, uint64Zero)} } - case pref.FloatKind: + case protoreflect.FloatKind: if t.Kind() == reflect.Float32 { return &float32Converter{t, defVal(fd, float32Zero)} } - case pref.DoubleKind: + case protoreflect.DoubleKind: if t.Kind() == reflect.Float64 { return &float64Converter{t, defVal(fd, float64Zero)} } - case pref.StringKind: + case protoreflect.StringKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &stringConverter{t, defVal(fd, stringZero)} } - case pref.BytesKind: + case protoreflect.BytesKind: if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { return &bytesConverter{t, defVal(fd, bytesZero)} } - case pref.EnumKind: + case protoreflect.EnumKind: // Handle enums, which must be a named int32 type. if t.Kind() == reflect.Int32 { return newEnumConverter(t, fd) } - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return newMessageConverter(t) } panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) @@ -145,184 +145,184 @@ func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { type boolConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *boolConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfBool(v.Bool()) + return protoreflect.ValueOfBool(v.Bool()) } -func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *boolConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bool()).Convert(c.goType) } -func (c *boolConverter) IsValidPB(v pref.Value) bool { +func (c *boolConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(bool) return ok } func (c *boolConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *boolConverter) New() pref.Value { return c.def } -func (c *boolConverter) Zero() pref.Value { return c.def } +func (c *boolConverter) New() protoreflect.Value { return c.def } +func (c *boolConverter) Zero() protoreflect.Value { return c.def } type int32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt32(int32(v.Int())) + return protoreflect.ValueOfInt32(int32(v.Int())) } -func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int32(v.Int())).Convert(c.goType) } -func (c *int32Converter) IsValidPB(v pref.Value) bool { +func (c *int32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int32) return ok } func (c *int32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int32Converter) New() pref.Value { return c.def } -func (c *int32Converter) Zero() pref.Value { return c.def } +func (c *int32Converter) New() protoreflect.Value { return c.def } +func (c *int32Converter) Zero() protoreflect.Value { return c.def } type int64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *int64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfInt64(int64(v.Int())) + return protoreflect.ValueOfInt64(int64(v.Int())) } -func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *int64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(int64(v.Int())).Convert(c.goType) } -func (c *int64Converter) IsValidPB(v pref.Value) bool { +func (c *int64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(int64) return ok } func (c *int64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *int64Converter) New() pref.Value { return c.def } -func (c *int64Converter) Zero() pref.Value { return c.def } +func (c *int64Converter) New() protoreflect.Value { return c.def } +func (c *int64Converter) Zero() protoreflect.Value { return c.def } type uint32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint32(uint32(v.Uint())) + return protoreflect.ValueOfUint32(uint32(v.Uint())) } -func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) } -func (c *uint32Converter) IsValidPB(v pref.Value) bool { +func (c *uint32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint32) return ok } func (c *uint32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint32Converter) New() pref.Value { return c.def } -func (c *uint32Converter) Zero() pref.Value { return c.def } +func (c *uint32Converter) New() protoreflect.Value { return c.def } +func (c *uint32Converter) Zero() protoreflect.Value { return c.def } type uint64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *uint64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfUint64(uint64(v.Uint())) + return protoreflect.ValueOfUint64(uint64(v.Uint())) } -func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *uint64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) } -func (c *uint64Converter) IsValidPB(v pref.Value) bool { +func (c *uint64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(uint64) return ok } func (c *uint64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *uint64Converter) New() pref.Value { return c.def } -func (c *uint64Converter) Zero() pref.Value { return c.def } +func (c *uint64Converter) New() protoreflect.Value { return c.def } +func (c *uint64Converter) Zero() protoreflect.Value { return c.def } type float32Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float32Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat32(float32(v.Float())) + return protoreflect.ValueOfFloat32(float32(v.Float())) } -func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float32Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float32(v.Float())).Convert(c.goType) } -func (c *float32Converter) IsValidPB(v pref.Value) bool { +func (c *float32Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float32) return ok } func (c *float32Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float32Converter) New() pref.Value { return c.def } -func (c *float32Converter) Zero() pref.Value { return c.def } +func (c *float32Converter) New() protoreflect.Value { return c.def } +func (c *float32Converter) Zero() protoreflect.Value { return c.def } type float64Converter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { +func (c *float64Converter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfFloat64(float64(v.Float())) + return protoreflect.ValueOfFloat64(float64(v.Float())) } -func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { +func (c *float64Converter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(float64(v.Float())).Convert(c.goType) } -func (c *float64Converter) IsValidPB(v pref.Value) bool { +func (c *float64Converter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(float64) return ok } func (c *float64Converter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *float64Converter) New() pref.Value { return c.def } -func (c *float64Converter) Zero() pref.Value { return c.def } +func (c *float64Converter) New() protoreflect.Value { return c.def } +func (c *float64Converter) Zero() protoreflect.Value { return c.def } type stringConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfString(v.Convert(stringType).String()) + return protoreflect.ValueOfString(v.Convert(stringType).String()) } -func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { // pref.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) @@ -331,71 +331,71 @@ func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { } return reflect.ValueOf(s).Convert(c.goType) } -func (c *stringConverter) IsValidPB(v pref.Value) bool { +func (c *stringConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().(string) return ok } func (c *stringConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *stringConverter) New() pref.Value { return c.def } -func (c *stringConverter) Zero() pref.Value { return c.def } +func (c *stringConverter) New() protoreflect.Value { return c.def } +func (c *stringConverter) Zero() protoreflect.Value { return c.def } type bytesConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *bytesConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } if c.goType.Kind() == reflect.String && v.Len() == 0 { - return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + return protoreflect.ValueOfBytes(nil) // ensure empty string is []byte(nil) } - return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) + return protoreflect.ValueOfBytes(v.Convert(bytesType).Bytes()) } -func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *bytesConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Bytes()).Convert(c.goType) } -func (c *bytesConverter) IsValidPB(v pref.Value) bool { +func (c *bytesConverter) IsValidPB(v protoreflect.Value) bool { _, ok := v.Interface().([]byte) return ok } func (c *bytesConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *bytesConverter) New() pref.Value { return c.def } -func (c *bytesConverter) Zero() pref.Value { return c.def } +func (c *bytesConverter) New() protoreflect.Value { return c.def } +func (c *bytesConverter) Zero() protoreflect.Value { return c.def } type enumConverter struct { goType reflect.Type - def pref.Value + def protoreflect.Value } -func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { - var def pref.Value - if fd.Cardinality() == pref.Repeated { - def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) +func newEnumConverter(goType reflect.Type, fd protoreflect.FieldDescriptor) Converter { + var def protoreflect.Value + if fd.Cardinality() == protoreflect.Repeated { + def = protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number()) } else { def = fd.Default() } return &enumConverter{goType, def} } -func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *enumConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfEnum(pref.EnumNumber(v.Int())) + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v.Int())) } -func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *enumConverter) GoValueOf(v protoreflect.Value) reflect.Value { return reflect.ValueOf(v.Enum()).Convert(c.goType) } -func (c *enumConverter) IsValidPB(v pref.Value) bool { - _, ok := v.Interface().(pref.EnumNumber) +func (c *enumConverter) IsValidPB(v protoreflect.Value) bool { + _, ok := v.Interface().(protoreflect.EnumNumber) return ok } @@ -403,11 +403,11 @@ func (c *enumConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *enumConverter) New() pref.Value { +func (c *enumConverter) New() protoreflect.Value { return c.def } -func (c *enumConverter) Zero() pref.Value { +func (c *enumConverter) Zero() protoreflect.Value { return c.def } @@ -419,7 +419,7 @@ func newMessageConverter(goType reflect.Type) Converter { return &messageConverter{goType} } -func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *messageConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } @@ -430,13 +430,13 @@ func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { v = reflect.Zero(reflect.PtrTo(v.Type())) } } - if m, ok := v.Interface().(pref.ProtoMessage); ok { - return pref.ValueOfMessage(m.ProtoReflect()) + if m, ok := v.Interface().(protoreflect.ProtoMessage); ok { + return protoreflect.ValueOfMessage(m.ProtoReflect()) } - return pref.ValueOfMessage(legacyWrapMessage(v)) + return protoreflect.ValueOfMessage(legacyWrapMessage(v)) } -func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *messageConverter) GoValueOf(v protoreflect.Value) reflect.Value { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -460,7 +460,7 @@ func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { return rv } -func (c *messageConverter) IsValidPB(v pref.Value) bool { +func (c *messageConverter) IsValidPB(v protoreflect.Value) bool { m := v.Message() var rv reflect.Value if u, ok := m.(unwrapper); ok { @@ -478,14 +478,14 @@ func (c *messageConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *messageConverter) New() pref.Value { +func (c *messageConverter) New() protoreflect.Value { if c.isNonPointer() { return c.PBValueOf(reflect.New(c.goType).Elem()) } return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *messageConverter) Zero() pref.Value { +func (c *messageConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go index 6fccab520..f89136516 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -8,10 +8,10 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) -func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { +func newListConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { switch { case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} @@ -26,16 +26,16 @@ type listConverter struct { c Converter } -func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } pv := reflect.New(c.goType) pv.Elem().Set(v) - return pref.ValueOfList(&listReflect{pv, c.c}) + return protoreflect.ValueOfList(&listReflect{pv, c.c}) } -func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listConverter) GoValueOf(v protoreflect.Value) reflect.Value { rv := v.List().(*listReflect).v if rv.IsNil() { return reflect.Zero(c.goType) @@ -43,7 +43,7 @@ func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { return rv.Elem() } -func (c *listConverter) IsValidPB(v pref.Value) bool { +func (c *listConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -55,12 +55,12 @@ func (c *listConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listConverter) New() pref.Value { - return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +func (c *listConverter) New() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) } -func (c *listConverter) Zero() pref.Value { - return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +func (c *listConverter) Zero() protoreflect.Value { + return protoreflect.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) } type listPtrConverter struct { @@ -68,18 +68,18 @@ type listPtrConverter struct { c Converter } -func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *listPtrConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfList(&listReflect{v, c.c}) + return protoreflect.ValueOfList(&listReflect{v, c.c}) } -func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *listPtrConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.List().(*listReflect).v } -func (c *listPtrConverter) IsValidPB(v pref.Value) bool { +func (c *listPtrConverter) IsValidPB(v protoreflect.Value) bool { list, ok := v.Interface().(*listReflect) if !ok { return false @@ -91,11 +91,11 @@ func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *listPtrConverter) New() pref.Value { +func (c *listPtrConverter) New() protoreflect.Value { return c.PBValueOf(reflect.New(c.goType.Elem())) } -func (c *listPtrConverter) Zero() pref.Value { +func (c *listPtrConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -110,16 +110,16 @@ func (ls *listReflect) Len() int { } return ls.v.Elem().Len() } -func (ls *listReflect) Get(i int) pref.Value { +func (ls *listReflect) Get(i int) protoreflect.Value { return ls.conv.PBValueOf(ls.v.Elem().Index(i)) } -func (ls *listReflect) Set(i int, v pref.Value) { +func (ls *listReflect) Set(i int, v protoreflect.Value) { ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) } -func (ls *listReflect) Append(v pref.Value) { +func (ls *listReflect) Append(v protoreflect.Value) { ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) } -func (ls *listReflect) AppendMutable() pref.Value { +func (ls *listReflect) AppendMutable() protoreflect.Value { if _, ok := ls.conv.(*messageConverter); !ok { panic("invalid AppendMutable on list with non-message type") } @@ -130,7 +130,7 @@ func (ls *listReflect) AppendMutable() pref.Value { func (ls *listReflect) Truncate(i int) { ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) } -func (ls *listReflect) NewElement() pref.Value { +func (ls *listReflect) NewElement() protoreflect.Value { return ls.conv.New() } func (ls *listReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index de06b2593..f30b0a057 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -8,7 +8,7 @@ import ( "fmt" "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type mapConverter struct { @@ -16,7 +16,7 @@ type mapConverter struct { keyConv, valConv Converter } -func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { +func newMapConverter(t reflect.Type, fd protoreflect.FieldDescriptor) *mapConverter { if t.Kind() != reflect.Map { panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } @@ -27,18 +27,18 @@ func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { } } -func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { +func (c *mapConverter) PBValueOf(v reflect.Value) protoreflect.Value { if v.Type() != c.goType { panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) } - return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) + return protoreflect.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) } -func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { +func (c *mapConverter) GoValueOf(v protoreflect.Value) reflect.Value { return v.Map().(*mapReflect).v } -func (c *mapConverter) IsValidPB(v pref.Value) bool { +func (c *mapConverter) IsValidPB(v protoreflect.Value) bool { mapv, ok := v.Interface().(*mapReflect) if !ok { return false @@ -50,11 +50,11 @@ func (c *mapConverter) IsValidGo(v reflect.Value) bool { return v.IsValid() && v.Type() == c.goType } -func (c *mapConverter) New() pref.Value { +func (c *mapConverter) New() protoreflect.Value { return c.PBValueOf(reflect.MakeMap(c.goType)) } -func (c *mapConverter) Zero() pref.Value { +func (c *mapConverter) Zero() protoreflect.Value { return c.PBValueOf(reflect.Zero(c.goType)) } @@ -67,29 +67,29 @@ type mapReflect struct { func (ms *mapReflect) Len() int { return ms.v.Len() } -func (ms *mapReflect) Has(k pref.MapKey) bool { +func (ms *mapReflect) Has(k protoreflect.MapKey) bool { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) return rv.IsValid() } -func (ms *mapReflect) Get(k pref.MapKey) pref.Value { +func (ms *mapReflect) Get(k protoreflect.MapKey) protoreflect.Value { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.v.MapIndex(rk) if !rv.IsValid() { - return pref.Value{} + return protoreflect.Value{} } return ms.valConv.PBValueOf(rv) } -func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { +func (ms *mapReflect) Set(k protoreflect.MapKey, v protoreflect.Value) { rk := ms.keyConv.GoValueOf(k.Value()) rv := ms.valConv.GoValueOf(v) ms.v.SetMapIndex(rk, rv) } -func (ms *mapReflect) Clear(k pref.MapKey) { +func (ms *mapReflect) Clear(k protoreflect.MapKey) { rk := ms.keyConv.GoValueOf(k.Value()) ms.v.SetMapIndex(rk, reflect.Value{}) } -func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { +func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { if _, ok := ms.valConv.(*messageConverter); !ok { panic("invalid Mutable on map with non-message value type") } @@ -100,7 +100,7 @@ func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { } return v } -func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { +func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { iter := mapRange(ms.v) for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() @@ -110,7 +110,7 @@ func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { } } } -func (ms *mapReflect) NewValue() pref.Value { +func (ms *mapReflect) NewValue() protoreflect.Value { return ms.valConv.New() } func (ms *mapReflect) IsValid() bool { diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a6..cda0520c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -12,12 +12,12 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +25,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -36,14 +37,17 @@ func (o unmarshalOptions) Options() proto.UnmarshalOptions { } } -func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } +func (o unmarshalOptions) DiscardUnknown() bool { + return o.flags&protoiface.UnmarshalDiscardUnknown != 0 +} func (o unmarshalOptions) IsDefault() bool { - return o.flags == 0 && o.resolver == preg.GlobalTypes + return o.flags == 0 && o.resolver == protoregistry.GlobalTypes } var lazyUnmarshalOptions = unmarshalOptions{ - resolver: preg.GlobalTypes, + resolver: protoregistry.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -52,7 +56,7 @@ type unmarshalOutput struct { } // unmarshal is protoreflect.Methods.Unmarshal. -func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func (mi *MessageInfo) unmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { var p pointer if ms, ok := in.Message.(*messageState); ok { p = ms.pointer() @@ -62,12 +66,13 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) - var flags piface.UnmarshalOutputFlags + var flags protoiface.UnmarshalOutputFlags if out.initialized { - flags |= piface.UnmarshalInitialized + flags |= protoiface.UnmarshalInitialized } - return piface.UnmarshalOutput{ + return protoiface.UnmarshalOutput{ Flags: flags, }, err } @@ -82,6 +87,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } @@ -202,7 +211,7 @@ func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp p var err error xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) if err != nil { - if err == preg.NotFound { + if err == protoregistry.NotFound { return out, errUnknown } return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go index 8c1eab4bf..5f3ef5ad7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -7,15 +7,15 @@ package impl import ( "reflect" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type EnumInfo struct { GoReflectType reflect.Type // int32 kind - Desc pref.EnumDescriptor + Desc protoreflect.EnumDescriptor } -func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { - return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +func (t *EnumInfo) New(n protoreflect.EnumNumber) protoreflect.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(protoreflect.Enum) } -func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } +func (t *EnumInfo) Descriptor() protoreflect.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go index e904fd993..cb25b0bae 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // ExtensionInfo implements ExtensionType. @@ -45,7 +45,7 @@ type ExtensionInfo struct { // since the message may no longer implement the MessageV1 interface. // // Deprecated: Use the ExtendedType method instead. - ExtendedType piface.MessageV1 + ExtendedType protoiface.MessageV1 // ExtensionType is the zero value of the extension type. // @@ -83,31 +83,31 @@ const ( extensionInfoFullInit = 2 ) -func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { +func InitExtensionInfo(xi *ExtensionInfo, xd protoreflect.ExtensionDescriptor, goType reflect.Type) { xi.goType = goType xi.desc = extensionTypeDescriptor{xd, xi} xi.init = extensionInfoDescInit } -func (xi *ExtensionInfo) New() pref.Value { +func (xi *ExtensionInfo) New() protoreflect.Value { return xi.lazyInit().New() } -func (xi *ExtensionInfo) Zero() pref.Value { +func (xi *ExtensionInfo) Zero() protoreflect.Value { return xi.lazyInit().Zero() } -func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { +func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value { return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { +func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} { return xi.lazyInit().GoValueOf(v).Interface() } -func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { +func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool { return xi.lazyInit().IsValidPB(v) } func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) } -func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { +func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor { if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { xi.lazyInitSlow() } @@ -144,13 +144,13 @@ func (xi *ExtensionInfo) lazyInitSlow() { } type extensionTypeDescriptor struct { - pref.ExtensionDescriptor + protoreflect.ExtensionDescriptor xi *ExtensionInfo } -func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { +func (xtd *extensionTypeDescriptor) Type() protoreflect.ExtensionType { return xtd.xi } -func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { +func (xtd *extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor { return xtd.ExtensionDescriptor } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go index f7d7ffb51..c2a803bb2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -13,13 +13,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" ) // legacyEnumName returns the name of enums used in legacy code. // It is neither the protobuf full name nor the qualified Go name, // but rather an odd hybrid of both. -func legacyEnumName(ed pref.EnumDescriptor) string { +func legacyEnumName(ed protoreflect.EnumDescriptor) string { var protoPkg string enumName := string(ed.FullName()) if fd := ed.ParentFile(); fd != nil { @@ -34,68 +33,68 @@ func legacyEnumName(ed pref.EnumDescriptor) string { // legacyWrapEnum wraps v as a protoreflect.Enum, // where v must be a int32 kind and not implement the v2 API already. -func legacyWrapEnum(v reflect.Value) pref.Enum { +func legacyWrapEnum(v reflect.Value) protoreflect.Enum { et := legacyLoadEnumType(v.Type()) - return et.New(pref.EnumNumber(v.Int())) + return et.New(protoreflect.EnumNumber(v.Int())) } var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType // legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, // where t must be an int32 kind and not implement the v2 API already. -func legacyLoadEnumType(t reflect.Type) pref.EnumType { +func legacyLoadEnumType(t reflect.Type) protoreflect.EnumType { // Fast-path: check if a EnumType is cached for this concrete type. if et, ok := legacyEnumTypeCache.Load(t); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } // Slow-path: derive enum descriptor and initialize EnumType. - var et pref.EnumType + var et protoreflect.EnumType ed := LegacyLoadEnumDesc(t) et = &legacyEnumType{ desc: ed, goType: t, } if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { - return et.(pref.EnumType) + return et.(protoreflect.EnumType) } return et } type legacyEnumType struct { - desc pref.EnumDescriptor + desc protoreflect.EnumDescriptor goType reflect.Type m sync.Map // map[protoreflect.EnumNumber]proto.Enum } -func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { +func (t *legacyEnumType) New(n protoreflect.EnumNumber) protoreflect.Enum { if e, ok := t.m.Load(n); ok { - return e.(pref.Enum) + return e.(protoreflect.Enum) } e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} t.m.Store(n, e) return e } -func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { +func (t *legacyEnumType) Descriptor() protoreflect.EnumDescriptor { return t.desc } type legacyEnumWrapper struct { - num pref.EnumNumber - pbTyp pref.EnumType + num protoreflect.EnumNumber + pbTyp protoreflect.EnumType goTyp reflect.Type } -func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { +func (e *legacyEnumWrapper) Descriptor() protoreflect.EnumDescriptor { return e.pbTyp.Descriptor() } -func (e *legacyEnumWrapper) Type() pref.EnumType { +func (e *legacyEnumWrapper) Type() protoreflect.EnumType { return e.pbTyp } -func (e *legacyEnumWrapper) Number() pref.EnumNumber { +func (e *legacyEnumWrapper) Number() protoreflect.EnumNumber { return e.num } -func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { +func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum { return e } func (e *legacyEnumWrapper) protoUnwrap() interface{} { @@ -105,8 +104,8 @@ func (e *legacyEnumWrapper) protoUnwrap() interface{} { } var ( - _ pref.Enum = (*legacyEnumWrapper)(nil) - _ unwrapper = (*legacyEnumWrapper)(nil) + _ protoreflect.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) ) var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor @@ -115,15 +114,15 @@ var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor // which must be an int32 kind and not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func LegacyLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := legacyEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: initialize EnumDescriptor from the raw descriptor. ev := reflect.Zero(t).Interface() - if _, ok := ev.(pref.Enum); ok { + if _, ok := ev.(protoreflect.Enum); ok { panic(fmt.Sprintf("%v already implements proto.Enum", t)) } edV1, ok := ev.(enumV1) @@ -132,7 +131,7 @@ func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { } b, idxs := edV1.EnumDescriptor() - var ed pref.EnumDescriptor + var ed protoreflect.EnumDescriptor if len(idxs) == 1 { ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) } else { @@ -158,10 +157,10 @@ var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescript // We are unable to use the global enum registry since it is // unfortunately keyed by the protobuf full name, which we also do not know. // Thus, this produces some bogus enum descriptor based on the Go type name. -func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { +func aberrantLoadEnumDesc(t reflect.Type) protoreflect.EnumDescriptor { // Fast-path: check if an EnumDescriptor is cached for this concrete type. if ed, ok := aberrantEnumDescCache.Load(t); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } // Slow-path: construct a bogus, but unique EnumDescriptor. @@ -182,7 +181,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // An exhaustive query is clearly impractical, but can be best-effort. if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { - return ed.(pref.EnumDescriptor) + return ed.(protoreflect.EnumDescriptor) } return ed } @@ -192,7 +191,7 @@ func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { // It should be sufficiently unique within a program. // // This is exported for testing purposes. -func AberrantDeriveFullName(t reflect.Type) pref.FullName { +func AberrantDeriveFullName(t reflect.Type) protoreflect.FullName { sanitize := func(r rune) rune { switch { case r == '/': @@ -215,5 +214,5 @@ func AberrantDeriveFullName(t reflect.Type) pref.FullName { ss[i] = "x" + s } } - return pref.FullName(strings.Join(ss, ".")) + return protoreflect.FullName(strings.Join(ss, ".")) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go index e3fb0b578..9b64ad5bb 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -12,21 +12,21 @@ import ( "reflect" "google.golang.org/protobuf/internal/errors" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // These functions exist to support exported APIs in generated protobufs. // While these are deprecated, they cannot be removed for compatibility reasons. // LegacyEnumName returns the name of enums used in legacy code. -func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { +func (Export) LegacyEnumName(ed protoreflect.EnumDescriptor) string { return legacyEnumName(ed) } // LegacyMessageTypeOf returns the protoreflect.MessageType for m, // with name used as the message name if necessary. -func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { +func (Export) LegacyMessageTypeOf(m protoiface.MessageV1, name protoreflect.FullName) protoreflect.MessageType { if mv := (Export{}).protoMessageV2Of(m); mv != nil { return mv.ProtoReflect().Type() } @@ -36,9 +36,9 @@ func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.M // UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. // The input can either be a string representing the enum value by name, // or a number representing the enum number itself. -func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { +func (Export) UnmarshalJSONEnum(ed protoreflect.EnumDescriptor, b []byte) (protoreflect.EnumNumber, error) { if b[0] == '"' { - var name pref.Name + var name protoreflect.Name if err := json.Unmarshal(b, &name); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -48,7 +48,7 @@ func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumb } return ev.Number(), nil } else { - var num pref.EnumNumber + var num protoreflect.EnumNumber if err := json.Unmarshal(b, &num); err != nil { return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) } @@ -81,8 +81,8 @@ func (Export) CompressGZIP(in []byte) (out []byte) { blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. blockSize = len(in) } - binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) - binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)) + binary.LittleEndian.PutUint16(blockHeader[3:5], ^uint16(blockSize)) out = append(out, blockHeader[:]...) out = append(out, in[:blockSize]...) in = in[blockSize:] diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 49e723161..87b30d050 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -12,16 +12,16 @@ import ( ptag "google.golang.org/protobuf/internal/encoding/tag" "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) func (xi *ExtensionInfo) initToLegacy() { xd := xi.desc - var parent piface.MessageV1 + var parent protoiface.MessageV1 messageName := xd.ContainingMessage().FullName() - if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(messageName); mt != nil { // Create a new parent message and unwrap it if possible. mv := mt.New().Interface() t := reflect.TypeOf(mv) @@ -31,7 +31,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Check whether the message implements the legacy v1 Message interface. mz := reflect.Zero(t).Interface() - if mz, ok := mz.(piface.MessageV1); ok { + if mz, ok := mz.(protoiface.MessageV1); ok { parent = mz } } @@ -46,7 +46,7 @@ func (xi *ExtensionInfo) initToLegacy() { // Reconstruct the legacy enum full name. var enumName string - if xd.Kind() == pref.EnumKind { + if xd.Kind() == protoreflect.EnumKind { enumName = legacyEnumName(xd.Enum()) } @@ -77,16 +77,16 @@ func (xi *ExtensionInfo) initFromLegacy() { // field number is specified. In such a case, use a placeholder. if xi.ExtendedType == nil || xi.ExtensionType == nil { xd := placeholderExtension{ - name: pref.FullName(xi.Name), - number: pref.FieldNumber(xi.Field), + name: protoreflect.FullName(xi.Name), + number: protoreflect.FieldNumber(xi.Field), } xi.desc = extensionTypeDescriptor{xd, xi} return } // Resolve enum or message dependencies. - var ed pref.EnumDescriptor - var md pref.MessageDescriptor + var ed protoreflect.EnumDescriptor + var md protoreflect.MessageDescriptor t := reflect.TypeOf(xi.ExtensionType) isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 @@ -94,18 +94,18 @@ func (xi *ExtensionInfo) initFromLegacy() { t = t.Elem() } switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: ed = v.Descriptor() case enumV1: ed = LegacyLoadEnumDesc(t) - case pref.ProtoMessage: + case protoreflect.ProtoMessage: md = v.ProtoReflect().Descriptor() case messageV1: md = LegacyLoadMessageDesc(t) } // Derive basic field information from the struct tag. - var evs pref.EnumValueDescriptors + var evs protoreflect.EnumValueDescriptors if ed != nil { evs = ed.Values() } @@ -114,8 +114,8 @@ func (xi *ExtensionInfo) initFromLegacy() { // Construct a v2 ExtensionType. xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} xd.L0.ParentFile = filedesc.SurrogateProto2 - xd.L0.FullName = pref.FullName(xi.Name) - xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L0.FullName = protoreflect.FullName(xi.Name) + xd.L1.Number = protoreflect.FieldNumber(xi.Field) xd.L1.Cardinality = fd.L1.Cardinality xd.L1.Kind = fd.L1.Kind xd.L2.IsPacked = fd.L1.IsPacked @@ -138,39 +138,39 @@ func (xi *ExtensionInfo) initFromLegacy() { } type placeholderExtension struct { - name pref.FullName - number pref.FieldNumber + name protoreflect.FullName + number protoreflect.FieldNumber } -func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } -func (x placeholderExtension) Parent() pref.Descriptor { return nil } -func (x placeholderExtension) Index() int { return 0 } -func (x placeholderExtension) Syntax() pref.Syntax { return 0 } -func (x placeholderExtension) Name() pref.Name { return x.name.Name() } -func (x placeholderExtension) FullName() pref.FullName { return x.name } -func (x placeholderExtension) IsPlaceholder() bool { return true } -func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } -func (x placeholderExtension) Number() pref.FieldNumber { return x.number } -func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } -func (x placeholderExtension) Kind() pref.Kind { return 0 } -func (x placeholderExtension) HasJSONName() bool { return false } -func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } -func (x placeholderExtension) HasPresence() bool { return false } -func (x placeholderExtension) HasOptionalKeyword() bool { return false } -func (x placeholderExtension) IsExtension() bool { return true } -func (x placeholderExtension) IsWeak() bool { return false } -func (x placeholderExtension) IsPacked() bool { return false } -func (x placeholderExtension) IsList() bool { return false } -func (x placeholderExtension) IsMap() bool { return false } -func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } -func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } -func (x placeholderExtension) HasDefault() bool { return false } -func (x placeholderExtension) Default() pref.Value { return pref.Value{} } -func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } -func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } -func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } -func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } -func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } -func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } -func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } +func (x placeholderExtension) ParentFile() protoreflect.FileDescriptor { return nil } +func (x placeholderExtension) Parent() protoreflect.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() protoreflect.Syntax { return 0 } +func (x placeholderExtension) Name() protoreflect.Name { return x.name.Name() } +func (x placeholderExtension) FullName() protoreflect.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() protoreflect.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() protoreflect.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() protoreflect.Cardinality { return 0 } +func (x placeholderExtension) Kind() protoreflect.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) TextName() string { return "[" + string(x.name) + "]" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() protoreflect.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() protoreflect.Value { return protoreflect.Value{} } +func (x placeholderExtension) DefaultEnumValue() protoreflect.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() protoreflect.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() protoreflect.EnumDescriptor { return nil } +func (x placeholderExtension) Message() protoreflect.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(protoreflect.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index 029feeefd..61c483fac 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -16,14 +16,12 @@ import ( "google.golang.org/protobuf/internal/filedesc" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/runtime/protoiface" - piface "google.golang.org/protobuf/runtime/protoiface" ) // legacyWrapMessage wraps v as a protoreflect.Message, // where v must be a *struct kind and not implement the v2 API already. -func legacyWrapMessage(v reflect.Value) pref.Message { +func legacyWrapMessage(v reflect.Value) protoreflect.Message { t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessage{v: v} @@ -35,7 +33,7 @@ func legacyWrapMessage(v reflect.Value) pref.Message { // legacyLoadMessageType dynamically loads a protoreflect.Type for t, // where t must be not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageType(t reflect.Type, name pref.FullName) protoreflect.MessageType { +func legacyLoadMessageType(t reflect.Type, name protoreflect.FullName) protoreflect.MessageType { if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return aberrantMessageType{t} } @@ -47,7 +45,7 @@ var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo // legacyLoadMessageInfo dynamically loads a *MessageInfo for t, // where t must be a *struct kind and not implement the v2 API already. // The provided name is used if it cannot be determined from the message. -func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { +func legacyLoadMessageInfo(t reflect.Type, name protoreflect.FullName) *MessageInfo { // Fast-path: check if a MessageInfo is cached for this concrete type. if mt, ok := legacyMessageTypeCache.Load(t); ok { return mt.(*MessageInfo) @@ -68,7 +66,7 @@ func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Flags |= protoiface.SupportMarshalDeterministic } if _, hasUnmarshal = v.(legacyUnmarshaler); hasUnmarshal { mi.methods.Unmarshal = legacyUnmarshal @@ -89,18 +87,18 @@ var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDesc // which should be a *struct kind and must not implement the v2 API already. // // This is exported for testing purposes. -func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { +func LegacyLoadMessageDesc(t reflect.Type) protoreflect.MessageDescriptor { return legacyLoadMessageDesc(t, "") } -func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func legacyLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if a MessageDescriptor is cached for this concrete type. if mi, ok := legacyMessageDescCache.Load(t); ok { - return mi.(pref.MessageDescriptor) + return mi.(protoreflect.MessageDescriptor) } // Slow-path: initialize MessageDescriptor from the raw descriptor. mv := reflect.Zero(t).Interface() - if _, ok := mv.(pref.ProtoMessage); ok { + if _, ok := mv.(protoreflect.ProtoMessage); ok { panic(fmt.Sprintf("%v already implements proto.Message", t)) } mdV1, ok := mv.(messageV1) @@ -164,7 +162,7 @@ var ( // // This is a best-effort derivation of the message descriptor using the protobuf // tags on the struct fields. -func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDesc(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { aberrantMessageDescLock.Lock() defer aberrantMessageDescLock.Unlock() if aberrantMessageDescCache == nil { @@ -172,7 +170,7 @@ func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDes } return aberrantLoadMessageDescReentrant(t, name) } -func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { +func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName) protoreflect.MessageDescriptor { // Fast-path: check if an MessageDescriptor is cached for this concrete type. if md, ok := aberrantMessageDescCache[t]; ok { return md @@ -225,9 +223,9 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] for i := 0; i < vs.Len(); i++ { v := vs.Index(i) - md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ - pref.FieldNumber(v.FieldByName("Start").Int()), - pref.FieldNumber(v.FieldByName("End").Int() + 1), + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]protoreflect.FieldNumber{ + protoreflect.FieldNumber(v.FieldByName("Start").Int()), + protoreflect.FieldNumber(v.FieldByName("End").Int() + 1), }) md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) } @@ -245,7 +243,7 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M n := len(md.L2.Oneofs.List) md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) od := &md.L2.Oneofs.List[n] - od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.FullName = md.FullName().Append(protoreflect.Name(tag)) od.L0.ParentFile = md.L0.ParentFile od.L0.Parent = md od.L0.Index = n @@ -267,14 +265,14 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.M return md } -func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { +func aberrantDeriveMessageName(t reflect.Type, name protoreflect.FullName) protoreflect.FullName { if name.IsValid() { return name } func() { defer func() { recover() }() // swallow possible nil panics if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { - name = pref.FullName(m.XXX_MessageName()) + name = protoreflect.FullName(m.XXX_MessageName()) } }() if name.IsValid() { @@ -305,7 +303,7 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Index = n if fd.L1.IsWeak || fd.L1.HasPacked { - fd.L1.Options = func() pref.ProtoMessage { + fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() if fd.L1.IsWeak { opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) @@ -318,17 +316,17 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, } // Populate Enum and Message. - if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + if fd.Enum() == nil && fd.Kind() == protoreflect.EnumKind { switch v := reflect.Zero(t).Interface().(type) { - case pref.Enum: + case protoreflect.Enum: fd.L1.Enum = v.Descriptor() default: fd.L1.Enum = LegacyLoadEnumDesc(t) } } - if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + if fd.Message() == nil && (fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind) { switch v := reflect.Zero(t).Interface().(type) { - case pref.ProtoMessage: + case protoreflect.ProtoMessage: fd.L1.Message = v.ProtoReflect().Descriptor() case messageV1: fd.L1.Message = LegacyLoadMessageDesc(t) @@ -337,13 +335,13 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, n := len(md.L1.Messages.List) md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) md2 := &md.L1.Messages.List[n] - md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.FullName = md.FullName().Append(protoreflect.Name(strs.MapEntryName(string(fd.Name())))) md2.L0.ParentFile = md.L0.ParentFile md2.L0.Parent = md md2.L0.Index = n md2.L1.IsMapEntry = true - md2.L2.Options = func() pref.ProtoMessage { + md2.L2.Options = func() protoreflect.ProtoMessage { opts := descopts.Message.ProtoReflect().New() opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) return opts.Interface() @@ -364,8 +362,8 @@ type placeholderEnumValues struct { protoreflect.EnumValueDescriptors } -func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { - return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +func (placeholderEnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(protoreflect.FullName(fmt.Sprintf("UNKNOWN_%d", n))) } // legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. @@ -383,7 +381,7 @@ type legacyMerger interface { Merge(protoiface.MessageV1) } -var aberrantProtoMethods = &piface.Methods{ +var aberrantProtoMethods = &protoiface.Methods{ Marshal: legacyMarshal, Unmarshal: legacyUnmarshal, Merge: legacyMerge, @@ -392,40 +390,40 @@ var aberrantProtoMethods = &piface.Methods{ // supports deterministic serialization or not, but this // preserves the v1 implementation's behavior of always // calling Marshal methods when present. - Flags: piface.SupportMarshalDeterministic, + Flags: protoiface.SupportMarshalDeterministic, } -func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { +func legacyMarshal(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() marshaler, ok := v.(legacyMarshaler) if !ok { - return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + return protoiface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) } out, err := marshaler.Marshal() if in.Buf != nil { out = append(in.Buf, out...) } - return piface.MarshalOutput{ + return protoiface.MarshalOutput{ Buf: out, }, err } -func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { +func legacyUnmarshal(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { v := in.Message.(unwrapper).protoUnwrap() unmarshaler, ok := v.(legacyUnmarshaler) if !ok { - return piface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) + return protoiface.UnmarshalOutput{}, errors.New("%T does not implement Unmarshal", v) } - return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) + return protoiface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) } -func legacyMerge(in piface.MergeInput) piface.MergeOutput { +func legacyMerge(in protoiface.MergeInput) protoiface.MergeOutput { // Check whether this supports the legacy merger. dstv := in.Destination.(unwrapper).protoUnwrap() merger, ok := dstv.(legacyMerger) if ok { merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // If legacy merger is unavailable, implement merge in terms of @@ -433,29 +431,29 @@ func legacyMerge(in piface.MergeInput) piface.MergeOutput { srcv := in.Source.(unwrapper).protoUnwrap() marshaler, ok := srcv.(legacyMarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } dstv = in.Destination.(unwrapper).protoUnwrap() unmarshaler, ok := dstv.(legacyUnmarshaler) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } if !in.Source.IsValid() { // Legacy Marshal methods may not function on nil messages. // Check for a typed nil source only after we confirm that // legacy Marshal/Unmarshal methods are present, for // consistency. - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } b, err := marshaler.Marshal() if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } err = unmarshaler.Unmarshal(b) if err != nil { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } // aberrantMessageType implements MessageType for all types other than pointer-to-struct. @@ -463,19 +461,19 @@ type aberrantMessageType struct { t reflect.Type } -func (mt aberrantMessageType) New() pref.Message { +func (mt aberrantMessageType) New() protoreflect.Message { if mt.t.Kind() == reflect.Ptr { return aberrantMessage{reflect.New(mt.t.Elem())} } return aberrantMessage{reflect.Zero(mt.t)} } -func (mt aberrantMessageType) Zero() pref.Message { +func (mt aberrantMessageType) Zero() protoreflect.Message { return aberrantMessage{reflect.Zero(mt.t)} } func (mt aberrantMessageType) GoType() reflect.Type { return mt.t } -func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { +func (mt aberrantMessageType) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(mt.t) } @@ -499,56 +497,56 @@ func (m aberrantMessage) Reset() { } } -func (m aberrantMessage) ProtoReflect() pref.Message { +func (m aberrantMessage) ProtoReflect() protoreflect.Message { return m } -func (m aberrantMessage) Descriptor() pref.MessageDescriptor { +func (m aberrantMessage) Descriptor() protoreflect.MessageDescriptor { return LegacyLoadMessageDesc(m.v.Type()) } -func (m aberrantMessage) Type() pref.MessageType { +func (m aberrantMessage) Type() protoreflect.MessageType { return aberrantMessageType{m.v.Type()} } -func (m aberrantMessage) New() pref.Message { +func (m aberrantMessage) New() protoreflect.Message { if m.v.Type().Kind() == reflect.Ptr { return aberrantMessage{reflect.New(m.v.Type().Elem())} } return aberrantMessage{reflect.Zero(m.v.Type())} } -func (m aberrantMessage) Interface() pref.ProtoMessage { +func (m aberrantMessage) Interface() protoreflect.ProtoMessage { return m } -func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m aberrantMessage) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { return } -func (m aberrantMessage) Has(pref.FieldDescriptor) bool { +func (m aberrantMessage) Has(protoreflect.FieldDescriptor) bool { return false } -func (m aberrantMessage) Clear(pref.FieldDescriptor) { +func (m aberrantMessage) Clear(protoreflect.FieldDescriptor) { panic("invalid Message.Clear on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Get(fd pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { if fd.Default().IsValid() { return fd.Default() } panic("invalid Message.Get on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { +func (m aberrantMessage) Set(protoreflect.FieldDescriptor, protoreflect.Value) { panic("invalid Message.Set on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) Mutable(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.Mutable on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { +func (m aberrantMessage) NewField(protoreflect.FieldDescriptor) protoreflect.Value { panic("invalid Message.NewField on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { +func (m aberrantMessage) WhichOneof(protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { panic("invalid Message.WhichOneof descriptor on " + string(m.Descriptor().FullName())) } -func (m aberrantMessage) GetUnknown() pref.RawFields { +func (m aberrantMessage) GetUnknown() protoreflect.RawFields { return nil } -func (m aberrantMessage) SetUnknown(pref.RawFields) { +func (m aberrantMessage) SetUnknown(protoreflect.RawFields) { // SetUnknown discards its input on messages which don't support unknown field storage. } func (m aberrantMessage) IsValid() bool { @@ -557,7 +555,7 @@ func (m aberrantMessage) IsValid() bool { } return false } -func (m aberrantMessage) ProtoMethods() *piface.Methods { +func (m aberrantMessage) ProtoMethods() *protoiface.Methods { return aberrantProtoMethods } func (m aberrantMessage) protoUnwrap() interface{} { diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go index c65bbc044..7e65f64f2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/merge.go +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -9,8 +9,8 @@ import ( "reflect" "google.golang.org/protobuf/proto" - pref "google.golang.org/protobuf/reflect/protoreflect" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) type mergeOptions struct{} @@ -20,17 +20,17 @@ func (o mergeOptions) Merge(dst, src proto.Message) { } // merge is protoreflect.Methods.Merge. -func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { +func (mi *MessageInfo) merge(in protoiface.MergeInput) protoiface.MergeOutput { dp, ok := mi.getPointer(in.Destination) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } sp, ok := mi.getPointer(in.Source) if !ok { - return piface.MergeOutput{} + return protoiface.MergeOutput{} } mi.mergePointer(dp, sp, mergeOptions{}) - return piface.MergeOutput{Flags: piface.MergeComplete} + return protoiface.MergeOutput{Flags: protoiface.MergeComplete} } func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { @@ -64,7 +64,7 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { continue } dx := (*dext)[num] - var dv pref.Value + var dv protoreflect.Value if dx.Type() == sx.Type() { dv = dx.Value() } @@ -85,15 +85,15 @@ func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { } } -func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeScalarValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { return src } -func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { - return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +func mergeBytesValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { + return protoreflect.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) } -func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { @@ -102,29 +102,29 @@ func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { return dst } -func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeBytesListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sb := srcl.Get(i).Bytes() db := append(emptyBuf[:], sb...) - dstl.Append(pref.ValueOfBytes(db)) + dstl.Append(protoreflect.ValueOfBytes(db)) } return dst } -func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageListValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { dstl := dst.List() srcl := src.List() for i, llen := 0, srcl.Len(); i < llen; i++ { sm := srcl.Get(i).Message() dm := proto.Clone(sm.Interface()).ProtoReflect() - dstl.Append(pref.ValueOfMessage(dm)) + dstl.Append(protoreflect.ValueOfMessage(dm)) } return dst } -func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { +func mergeMessageValue(dst, src protoreflect.Value, opts mergeOptions) protoreflect.Value { opts.Merge(dst.Message().Interface(), src.Message().Interface()) return dst } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index a104e28e8..4f5fb67a0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,8 +14,7 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -29,7 +28,7 @@ type MessageInfo struct { GoReflectType reflect.Type // pointer to struct // Desc is the underlying message descriptor type and must be populated. - Desc pref.MessageDescriptor + Desc protoreflect.MessageDescriptor // Exporter must be provided in a purego environment in order to provide // access to unexported fields. @@ -54,7 +53,7 @@ type exporter func(v interface{}, i int) interface{} // is generated by our implementation of protoc-gen-go (for v2 and on). // If it is unable to obtain a MessageInfo, it returns nil. func getMessageInfo(mt reflect.Type) *MessageInfo { - m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + m, ok := reflect.Zero(mt).Interface().(protoreflect.ProtoMessage) if !ok { return nil } @@ -97,7 +96,7 @@ func (mi *MessageInfo) initOnce() { // getPointer returns the pointer for a message, which should be of // the type of the MessageInfo. If the message is of a different type, // it returns ok==false. -func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { +func (mi *MessageInfo) getPointer(m protoreflect.Message) (p pointer, ok bool) { switch m := m.(type) { case *messageState: return m.pointer(), m.messageInfo() == mi @@ -134,10 +133,10 @@ type structInfo struct { extensionOffset offset extensionType reflect.Type - fieldsByNumber map[pref.FieldNumber]reflect.StructField - oneofsByName map[pref.Name]reflect.StructField - oneofWrappersByType map[reflect.Type]pref.FieldNumber - oneofWrappersByNumber map[pref.FieldNumber]reflect.Type + fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField + oneofsByName map[protoreflect.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber + oneofWrappersByNumber map[protoreflect.FieldNumber]reflect.Type } func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { @@ -147,10 +146,10 @@ func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { unknownOffset: invalidOffset, extensionOffset: invalidOffset, - fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, - oneofsByName: map[pref.Name]reflect.StructField{}, - oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, - oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{}, + oneofsByName: map[protoreflect.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]protoreflect.FieldNumber{}, + oneofWrappersByNumber: map[protoreflect.FieldNumber]reflect.Type{}, } fieldLoop: @@ -180,12 +179,12 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.fieldsByNumber[pref.FieldNumber(n)] = f + si.fieldsByNumber[protoreflect.FieldNumber(n)] = f continue fieldLoop } } if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { - si.oneofsByName[pref.Name(s)] = f + si.oneofsByName[protoreflect.Name(s)] = f continue fieldLoop } } @@ -208,8 +207,8 @@ fieldLoop: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { n, _ := strconv.ParseUint(s, 10, 64) - si.oneofWrappersByType[tf] = pref.FieldNumber(n) - si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + si.oneofWrappersByType[tf] = protoreflect.FieldNumber(n) + si.oneofWrappersByNumber[protoreflect.FieldNumber(n)] = tf break } } @@ -219,7 +218,11 @@ fieldLoop: } func (mi *MessageInfo) New() protoreflect.Message { - return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) + m := reflect.New(mi.GoReflectType.Elem()).Interface() + if r, ok := m.(protoreflect.ProtoMessage); ok { + return r.ProtoReflect() + } + return mi.MessageOf(m) } func (mi *MessageInfo) Zero() protoreflect.Message { return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) @@ -237,7 +240,7 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { fd := mi.Desc.Fields().Get(i) switch { case fd.IsWeak(): - mt, _ := preg.GlobalTypes.FindMessageByName(fd.Message().FullName()) + mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 9488b7261..d9ea010be 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -10,17 +10,17 @@ import ( "google.golang.org/protobuf/internal/detrand" "google.golang.org/protobuf/internal/pragma" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type reflectMessageInfo struct { - fields map[pref.FieldNumber]*fieldInfo - oneofs map[pref.Name]*oneofInfo + fields map[protoreflect.FieldNumber]*fieldInfo + oneofs map[protoreflect.Name]*oneofInfo // fieldTypes contains the zero value of an enum or message field. // For lists, it contains the element type. // For maps, it contains the entry value type. - fieldTypes map[pref.FieldNumber]interface{} + fieldTypes map[protoreflect.FieldNumber]interface{} // denseFields is a subset of fields where: // 0 < fieldDesc.Number() < len(denseFields) @@ -30,8 +30,8 @@ type reflectMessageInfo struct { // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. rangeInfos []interface{} // either *fieldInfo or *oneofInfo - getUnknown func(pointer) pref.RawFields - setUnknown func(pointer, pref.RawFields) + getUnknown func(pointer) protoreflect.RawFields + setUnknown func(pointer, protoreflect.RawFields) extensionMap func(pointer) *extensionMap nilMessage atomicNilMessage @@ -52,7 +52,7 @@ func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { // This code assumes that the struct is well-formed and panics if there are // any discrepancies. func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { - mi.fields = map[pref.FieldNumber]*fieldInfo{} + mi.fields = map[protoreflect.FieldNumber]*fieldInfo{} md := mi.Desc fds := md.Fields() for i := 0; i < fds.Len(); i++ { @@ -82,7 +82,7 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { mi.fields[fd.Number()] = &fi } - mi.oneofs = map[pref.Name]*oneofInfo{} + mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < md.Oneofs().Len(); i++ { od := md.Oneofs().Get(i) mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) @@ -117,13 +117,13 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { switch { case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsAType: // Handle as []byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } return *p.Apply(mi.unknownOffset).Bytes() } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -131,7 +131,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } case si.unknownOffset.IsValid() && si.unknownType == unknownFieldsBType: // Handle as *[]byte. - mi.getUnknown = func(p pointer) pref.RawFields { + mi.getUnknown = func(p pointer) protoreflect.RawFields { if p.IsNil() { return nil } @@ -141,7 +141,7 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { } return **bp } - mi.setUnknown = func(p pointer, b pref.RawFields) { + mi.setUnknown = func(p pointer, b protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -152,10 +152,10 @@ func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { **bp = b } default: - mi.getUnknown = func(pointer) pref.RawFields { + mi.getUnknown = func(pointer) protoreflect.RawFields { return nil } - mi.setUnknown = func(p pointer, _ pref.RawFields) { + mi.setUnknown = func(p pointer, _ protoreflect.RawFields) { if p.IsNil() { panic("invalid SetUnknown on nil Message") } @@ -224,7 +224,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } if ft != nil { if mi.fieldTypes == nil { - mi.fieldTypes = make(map[pref.FieldNumber]interface{}) + mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{}) } mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface() } @@ -233,7 +233,7 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { type extensionMap map[int32]ExtensionField -func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +func (m *extensionMap) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { if m != nil { for _, x := range *m { xd := x.Type().TypeDescriptor() @@ -247,7 +247,7 @@ func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { } } } -func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { +func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) { if m == nil { return false } @@ -266,10 +266,10 @@ func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { } return true } -func (m *extensionMap) Clear(xt pref.ExtensionType) { +func (m *extensionMap) Clear(xt protoreflect.ExtensionType) { delete(*m, int32(xt.TypeDescriptor().Number())) } -func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() if m != nil { if x, ok := (*m)[int32(xd.Number())]; ok { @@ -278,7 +278,7 @@ func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { } return xt.Zero() } -func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { +func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) { xd := xt.TypeDescriptor() isValid := true switch { @@ -302,9 +302,9 @@ func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { x.Set(xt, v) (*m)[int32(xd.Number())] = x } -func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { +func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value { xd := xt.TypeDescriptor() - if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() { panic("invalid Mutable on field with non-composite type") } if x, ok := (*m)[int32(xd.Number())]; ok { @@ -320,7 +320,6 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // in an allocation-free way without needing to have a shadow Go type generated // for every message type. This technique only works using unsafe. // -// // Example generated code: // // type M struct { @@ -351,12 +350,11 @@ func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { // It has access to the message info as its first field, and a pointer to the // MessageState is identical to a pointer to the concrete message value. // -// // Requirements: -// • The type M must implement protoreflect.ProtoMessage. -// • The address of m must not be nil. -// • The address of m and the address of m.state must be equal, -// even though they are different Go types. +// - The type M must implement protoreflect.ProtoMessage. +// - The address of m must not be nil. +// - The address of m and the address of m.state must be equal, +// even though they are different Go types. type MessageState struct { pragma.NoUnkeyedLiterals pragma.DoNotCompare @@ -368,8 +366,8 @@ type MessageState struct { type messageState MessageState var ( - _ pref.Message = (*messageState)(nil) - _ unwrapper = (*messageState)(nil) + _ protoreflect.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) ) // messageDataType is a tuple of a pointer to the message data and @@ -387,16 +385,16 @@ type ( ) var ( - _ pref.Message = (*messageReflectWrapper)(nil) - _ unwrapper = (*messageReflectWrapper)(nil) - _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) - _ unwrapper = (*messageIfaceWrapper)(nil) + _ protoreflect.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ protoreflect.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) ) // MessageOf returns a reflective view over a message. The input must be a // pointer to a named Go struct. If the provided type has a ProtoReflect method, // it must be implemented by calling this method. -func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { +func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message { if reflect.TypeOf(m) != mi.GoReflectType { panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) } @@ -421,7 +419,7 @@ func (m *messageIfaceWrapper) Reset() { rv.Elem().Set(reflect.Zero(rv.Type().Elem())) } } -func (m *messageIfaceWrapper) ProtoReflect() pref.Message { +func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message { return (*messageReflectWrapper)(m) } func (m *messageIfaceWrapper) protoUnwrap() interface{} { @@ -430,7 +428,7 @@ func (m *messageIfaceWrapper) protoUnwrap() interface{} { // checkField verifies that the provided field descriptor is valid. // Exactly one of the returned values is populated. -func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { +func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) { var fi *fieldInfo if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { fi = mi.denseFields[n] @@ -455,7 +453,7 @@ func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.Ext if !mi.Desc.ExtensionRanges().Has(fd.Number()) { panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) } - xtd, ok := fd.(pref.ExtensionTypeDescriptor) + xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor) if !ok { panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 343cf8721..5e736c60e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -11,24 +11,24 @@ import ( "sync" "google.golang.org/protobuf/internal/flags" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { - fieldDesc pref.FieldDescriptor + fieldDesc protoreflect.FieldDescriptor // These fields are used for protobuf reflection support. has func(pointer) bool clear func(pointer) - get func(pointer) pref.Value - set func(pointer, pref.Value) - mutable func(pointer) pref.Value - newMessage func() pref.Message - newField func() pref.Value + get func(pointer) protoreflect.Value + set func(pointer, protoreflect.Value) + mutable func(pointer) protoreflect.Value + newMessage func() protoreflect.Message + newField func() protoreflect.Value } -func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { +func fieldInfoForMissing(fd protoreflect.FieldDescriptor) fieldInfo { // This never occurs for generated message types. // It implies that a hand-crafted type has missing Go fields // for specific protobuf message fields. @@ -40,19 +40,19 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { clear: func(p pointer) { panic("missing Go struct field for " + string(fd.FullName())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { return fd.Default() }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { panic("missing Go struct field for " + string(fd.FullName())) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { panic("missing Go struct field for " + string(fd.FullName())) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { panic("missing Go struct field for " + string(fd.FullName())) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { if v := fd.Default(); v.IsValid() { return v } @@ -61,7 +61,7 @@ func fieldInfoForMissing(fd pref.FieldDescriptor) fieldInfo { } } -func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { +func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Interface { panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) @@ -102,7 +102,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -113,7 +113,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { rv.Set(reflect.New(ot)) @@ -121,7 +121,7 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export rv = rv.Elem().Elem().Field(0) rv.Set(conv.GoValueOf(v)) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { if !isMessage { panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) } @@ -131,20 +131,20 @@ func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x export } rv = rv.Elem().Elem().Field(0) if rv.Kind() == reflect.Ptr && rv.IsNil() { - rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + rv.Set(conv.GoValueOf(protoreflect.ValueOfMessage(conv.New().Message()))) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) @@ -166,7 +166,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -176,7 +176,7 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -184,20 +184,20 @@ func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter } rv.Set(pv) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if v.IsNil() { v.Set(reflect.MakeMap(fs.Type)) } return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Slice { panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) @@ -219,7 +219,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -229,7 +229,7 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() pv := conv.GoValueOf(v) if pv.IsNil() { @@ -237,11 +237,11 @@ func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporte } rv.Set(pv.Elem()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { v := p.Apply(fieldOffset).AsValueOf(fs.Type) return conv.PBValueOf(v) }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } @@ -252,7 +252,7 @@ var ( emptyBytes = reflect.ValueOf([]byte{}) ) -func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type nullable := fd.HasPresence() isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 @@ -300,7 +300,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } @@ -315,7 +315,7 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if nullable && rv.Kind() == reflect.Ptr { if rv.IsNil() { @@ -332,23 +332,23 @@ func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x expor } } }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } -func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { +func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { if !flags.ProtoLegacy { panic("no support for proto1 weak fields") } var once sync.Once - var messageType pref.MessageType + var messageType protoreflect.MessageType lazyInit := func() { once.Do(func() { messageName := fd.Message().FullName() - messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) if messageType == nil { panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) } @@ -368,18 +368,18 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn clear: func(p pointer) { p.Apply(weakOffset).WeakFields().clear(num) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { lazyInit() if p.IsNil() { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } m, ok := p.Apply(weakOffset).WeakFields().get(num) if !ok { - return pref.ValueOfMessage(messageType.Zero()) + return protoreflect.ValueOfMessage(messageType.Zero()) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { lazyInit() m := v.Message() if m.Descriptor() != messageType.Descriptor() { @@ -390,7 +390,7 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn } p.Apply(weakOffset).WeakFields().set(num, m.Interface()) }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { lazyInit() fs := p.Apply(weakOffset).WeakFields() m, ok := fs.get(num) @@ -398,20 +398,20 @@ func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldIn m = messageType.New().Interface() fs.set(num, m) } - return pref.ValueOfMessage(m.ProtoReflect()) + return protoreflect.ValueOfMessage(m.ProtoReflect()) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { lazyInit() return messageType.New() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { lazyInit() - return pref.ValueOfMessage(messageType.New()) + return protoreflect.ValueOfMessage(messageType.New()) }, } } -func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { +func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) @@ -433,47 +433,47 @@ func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x expo rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(reflect.Zero(rv.Type())) }, - get: func(p pointer) pref.Value { + get: func(p pointer) protoreflect.Value { if p.IsNil() { return conv.Zero() } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() return conv.PBValueOf(rv) }, - set: func(p pointer, v pref.Value) { + set: func(p pointer, v protoreflect.Value) { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() rv.Set(conv.GoValueOf(v)) if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) } }, - mutable: func(p pointer) pref.Value { + mutable: func(p pointer) protoreflect.Value { rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() == reflect.Ptr && rv.IsNil() { rv.Set(conv.GoValueOf(conv.New())) } return conv.PBValueOf(rv) }, - newMessage: func() pref.Message { + newMessage: func() protoreflect.Message { return conv.New().Message() }, - newField: func() pref.Value { + newField: func() protoreflect.Value { return conv.New() }, } } type oneofInfo struct { - oneofDesc pref.OneofDescriptor - which func(pointer) pref.FieldNumber + oneofDesc protoreflect.OneofDescriptor + which func(pointer) protoreflect.FieldNumber } -func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { +func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } @@ -486,7 +486,7 @@ func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInf } else { fs := si.oneofsByName[od.Name()] fieldOffset := offsetOf(fs, x) - oi.which = func(p pointer) pref.FieldNumber { + oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821e..4c491bdf4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85..ee0e0573e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index 08cfb6054..a24e6bbd7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -16,9 +16,9 @@ import ( "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/strs" - pref "google.golang.org/protobuf/reflect/protoreflect" - preg "google.golang.org/protobuf/reflect/protoregistry" - piface "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" ) // ValidationStatus is the result of validating the wire-format encoding of a message. @@ -56,20 +56,20 @@ func (v ValidationStatus) String() string { // of the message type. // // This function is exposed for testing. -func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { +func Validate(mt protoreflect.MessageType, in protoiface.UnmarshalInput) (out protoiface.UnmarshalOutput, _ ValidationStatus) { mi, ok := mt.(*MessageInfo) if !ok { return out, ValidationUnknown } if in.Resolver == nil { - in.Resolver = preg.GlobalTypes + in.Resolver = protoregistry.GlobalTypes } o, st := mi.validate(in.Buf, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, }) if o.initialized { - out.Flags |= piface.UnmarshalInitialized + out.Flags |= protoiface.UnmarshalInitialized } return out, st } @@ -106,22 +106,22 @@ const ( validationTypeMessageSetItem ) -func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { vi.mi = getMessageInfo(ot.Field(0).Type) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String } @@ -129,7 +129,7 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip default: vi = newValidationInfo(fd, ft) } - if fd.Cardinality() == pref.Required { + if fd.Cardinality() == protoreflect.Required { // Avoid overflow. The required field check is done with a 64-bit mask, with // any message containing more than 64 required fields always reported as // potentially uninitialized, so it is not important to get a precise count @@ -142,22 +142,22 @@ func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescrip return vi } -func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { +func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validationInfo { var vi validationInfo switch { case fd.IsList(): switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup if ft.Kind() == reflect.Slice { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -175,33 +175,33 @@ func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo case fd.IsMap(): vi.typ = validationTypeMap switch fd.MapKey().Kind() { - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.keyType = validationTypeUTF8String } } switch fd.MapValue().Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.valType = validationTypeMessage if ft.Kind() == reflect.Map { vi.mi = getMessageInfo(ft.Elem()) } - case pref.StringKind: + case protoreflect.StringKind: if strs.EnforceUTF8(fd) { vi.valType = validationTypeUTF8String } } default: switch fd.Kind() { - case pref.MessageKind: + case protoreflect.MessageKind: vi.typ = validationTypeMessage if !fd.IsWeak() { vi.mi = getMessageInfo(ft) } - case pref.GroupKind: + case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) - case pref.StringKind: + case protoreflect.StringKind: vi.typ = validationTypeBytes if strs.EnforceUTF8(fd) { vi.typ = validationTypeUTF8String @@ -314,11 +314,11 @@ State: break } messageName := fd.Message().FullName() - messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) switch err { case nil: vi.mi, _ = messageType.(*MessageInfo) - case preg.NotFound: + case protoregistry.NotFound: vi.typ = validationTypeBytes default: return out, ValidationUnknown @@ -335,7 +335,7 @@ State: // unmarshaling to begin failing. Supporting this requires some way to // determine if the resolver is frozen. xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) - if err != nil && err != preg.NotFound { + if err != nil && err != protoregistry.NotFound { return out, ValidationUnknown } if err == nil { @@ -513,7 +513,7 @@ State: } xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) switch { - case err == preg.NotFound: + case err == protoregistry.NotFound: b = b[n:] case err != nil: return out, ValidationUnknown diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go index 009cbefd1..eb79a7ba9 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -7,7 +7,7 @@ package impl import ( "fmt" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/reflect/protoregistry" ) @@ -17,32 +17,32 @@ import ( // defined directly on it. type weakFields WeakFields -func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { +func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { m, ok := w[int32(num)] return m, ok } -func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { +func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { if *w == nil { *w = make(weakFields) } (*w)[int32(num)] = m } -func (w *weakFields) clear(num pref.FieldNumber) { +func (w *weakFields) clear(num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { +func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { _, ok := w[int32(num)] return ok } -func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { +func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { delete(*w, int32(num)) } -func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { +func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { if m, ok := w[int32(num)]; ok { return m } @@ -53,7 +53,7 @@ func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pr return mt.Zero().Interface() } -func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { +func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { if m != nil { mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) if mt == nil { diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 2a24953f6..33745ed06 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -5,12 +5,12 @@ package order import ( - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // FieldOrder specifies the ordering to visit message fields. // It is a function that reports whether x is ordered before y. -type FieldOrder func(x, y pref.FieldDescriptor) bool +type FieldOrder func(x, y protoreflect.FieldDescriptor) bool var ( // AnyFieldOrder specifies no specific field ordering. @@ -18,9 +18,9 @@ var ( // LegacyFieldOrder sorts fields in the same ordering as emitted by // wire serialization in the github.com/golang/protobuf implementation. - LegacyFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + LegacyFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { ox, oy := x.ContainingOneof(), y.ContainingOneof() - inOneof := func(od pref.OneofDescriptor) bool { + inOneof := func(od protoreflect.OneofDescriptor) bool { return od != nil && !od.IsSynthetic() } @@ -41,14 +41,14 @@ var ( } // NumberFieldOrder sorts fields by their field number. - NumberFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + NumberFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { return x.Number() < y.Number() } // IndexNameFieldOrder sorts non-extension fields before extension fields. // Non-extensions are sorted according to their declaration index. // Extensions are sorted according to their full name. - IndexNameFieldOrder FieldOrder = func(x, y pref.FieldDescriptor) bool { + IndexNameFieldOrder FieldOrder = func(x, y protoreflect.FieldDescriptor) bool { // Non-extension fields sort before extension fields. if x.IsExtension() != y.IsExtension() { return !x.IsExtension() && y.IsExtension() @@ -64,7 +64,7 @@ var ( // KeyOrder specifies the ordering to visit map entries. // It is a function that reports whether x is ordered before y. -type KeyOrder func(x, y pref.MapKey) bool +type KeyOrder func(x, y protoreflect.MapKey) bool var ( // AnyKeyOrder specifies no specific key ordering. @@ -72,7 +72,7 @@ var ( // GenericKeyOrder sorts false before true, numeric keys in ascending order, // and strings in lexicographical ordering according to UTF-8 codepoints. - GenericKeyOrder KeyOrder = func(x, y pref.MapKey) bool { + GenericKeyOrder KeyOrder = func(x, y protoreflect.MapKey) bool { switch x.Interface().(type) { case bool: return !x.Bool() && y.Bool() diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go index c8090e0c5..1665a68e5 100644 --- a/vendor/google.golang.org/protobuf/internal/order/range.go +++ b/vendor/google.golang.org/protobuf/internal/order/range.go @@ -9,12 +9,12 @@ import ( "sort" "sync" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type messageField struct { - fd pref.FieldDescriptor - v pref.Value + fd protoreflect.FieldDescriptor + v protoreflect.Value } var messageFieldPool = sync.Pool{ @@ -25,8 +25,8 @@ type ( // FieldRnger is an interface for visiting all fields in a message. // The protoreflect.Message type implements this interface. FieldRanger interface{ Range(VisitField) } - // VisitField is called everytime a message field is visited. - VisitField = func(pref.FieldDescriptor, pref.Value) bool + // VisitField is called every time a message field is visited. + VisitField = func(protoreflect.FieldDescriptor, protoreflect.Value) bool ) // RangeFields iterates over the fields of fs according to the specified order. @@ -47,7 +47,7 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { }() // Collect all fields in the message and sort them. - fs.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + fs.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { fields = append(fields, messageField{fd, v}) return true }) @@ -64,8 +64,8 @@ func RangeFields(fs FieldRanger, less FieldOrder, fn VisitField) { } type mapEntry struct { - k pref.MapKey - v pref.Value + k protoreflect.MapKey + v protoreflect.Value } var mapEntryPool = sync.Pool{ @@ -76,8 +76,8 @@ type ( // EntryRanger is an interface for visiting all fields in a message. // The protoreflect.Map type implements this interface. EntryRanger interface{ Range(VisitEntry) } - // VisitEntry is called everytime a map entry is visited. - VisitEntry = func(pref.MapKey, pref.Value) bool + // VisitEntry is called every time a map entry is visited. + VisitEntry = func(protoreflect.MapKey, protoreflect.Value) bool ) // RangeEntries iterates over the entries of es according to the specified order. @@ -98,7 +98,7 @@ func RangeEntries(es EntryRanger, less KeyOrder, fn VisitEntry) { }() // Collect all entries in the map and sort them. - es.Range(func(k pref.MapKey, v pref.Value) bool { + es.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { entries = append(entries, mapEntry{k, v}) return true }) diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c97..a1f6f3338 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c7019..fea589c45 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs @@ -9,7 +10,7 @@ package strs import ( "unsafe" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -58,7 +59,7 @@ type Builder struct { // AppendFullName is equivalent to protoreflect.FullName.Append, // but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { +func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { n := len(prefix) + len(".") + len(name) if len(prefix) == 0 { n -= len(".") @@ -67,7 +68,7 @@ func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.Ful sb.buf = append(sb.buf, prefix...) sb.buf = append(sb.buf, '.') sb.buf = append(sb.buf, name...) - return pref.FullName(sb.last(n)) + return protoreflect.FullName(sb.last(n)) } // MakeString is equivalent to string(b), but optimized for large batches diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2..b480c5010 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -12,47 +12,46 @@ import ( // These constants determine the current version of this module. // -// // For our release process, we enforce the following rules: -// * Tagged releases use a tag that is identical to String. -// * Tagged releases never reference a commit where the String -// contains "devel". -// * The set of all commits in this repository where String -// does not contain "devel" must have a unique String. -// +// - Tagged releases use a tag that is identical to String. +// - Tagged releases never reference a commit where the String +// contains "devel". +// - The set of all commits in this repository where String +// does not contain "devel" must have a unique String. // // Steps for tagging a new release: -// 1. Create a new CL. // -// 2. Update Minor, Patch, and/or PreRelease as necessary. -// PreRelease must not contain the string "devel". +// 1. Create a new CL. // -// 3. Since the last released minor version, have there been any changes to -// generator that relies on new functionality in the runtime? -// If yes, then increment RequiredGenerated. +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". // -// 4. Since the last released minor version, have there been any changes to -// the runtime that removes support for old .pb.go source code? -// If yes, then increment SupportMinimum. +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. // -// 5. Send out the CL for review and submit it. -// Note that the next CL in step 8 must be submitted after this CL -// without any other CLs in-between. +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. // -// 6. Tag a new version, where the tag is is the current String. +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. // -// 7. Write release notes for all notable changes -// between this release and the last release. +// 6. Tag a new version, where the tag is is the current String. // -// 8. Create a new CL. +// 7. Write release notes for all notable changes +// between this release and the last release. // -// 9. Update PreRelease to include the string "devel". -// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// 8. Create a new CL. // -// 10. Send out the CL for review and submit it. +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 + Minor = 28 Patch = 1 PreRelease = "" ) @@ -60,6 +59,7 @@ const ( // String formats the version string for this module in semver format. // // Examples: +// // v1.20.1 // v1.21.0-rc.1 func String() string { diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88..48d47946b 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -19,7 +19,8 @@ import ( // UnmarshalOptions configures the unmarshaler. // // Example usage: -// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +// +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) type UnmarshalOptions struct { pragma.NoUnkeyedLiterals @@ -42,18 +43,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +71,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +97,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index c52d8c4ab..08d2a46f5 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -6,18 +6,17 @@ // // For documentation on protocol buffers in general, see: // -// https://developers.google.com/protocol-buffers +// https://developers.google.com/protocol-buffers // // For a tutorial on using protocol buffers with Go, see: // -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://developers.google.com/protocol-buffers/docs/gotutorial // // For a guide to generated Go protocol buffer code, see: // -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://developers.google.com/protocol-buffers/docs/reference/go-generated // -// -// Binary serialization +// # Binary serialization // // This package contains functions to convert to and from the wire format, // an efficient binary serialization of protocol buffers. @@ -30,8 +29,7 @@ // • Unmarshal converts a message from the wire format. // The UnmarshalOptions type provides more control over wire unmarshaling. // -// -// Basic message operations +// # Basic message operations // // • Clone makes a deep copy of a message. // @@ -45,8 +43,7 @@ // // • CheckInitialized reports whether all required fields in a message are set. // -// -// Optional scalar constructors +// # Optional scalar constructors // // The API for some generated messages represents optional scalar fields // as pointers to a value. For example, an optional string field has the @@ -61,16 +58,14 @@ // // Optional scalar fields are only supported in proto2. // -// -// Extension accessors +// # Extension accessors // // • HasExtension, GetExtension, SetExtension, and ClearExtension // access extension field values in a protocol buffer message. // // Extension fields are only supported in proto2. // -// -// Related packages +// # Related packages // // • Package "google.golang.org/protobuf/encoding/protojson" converts messages to // and from JSON. diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go index d18239c23..bf7f816d0 100644 --- a/vendor/google.golang.org/protobuf/proto/encode.go +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -16,7 +16,8 @@ import ( // MarshalOptions configures the marshaler. // // Example usage: -// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +// +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) type MarshalOptions struct { pragma.NoUnkeyedLiterals @@ -101,7 +102,9 @@ func (o MarshalOptions) Marshal(m Message) ([]byte, error) { // otherwise it returns a non-nil empty buffer. // // This is to assist the edge-case where user-code does the following: +// // m1.OptionalBytes, _ = proto.Marshal(m2) +// // where they expect the proto2 "optional_bytes" field to be populated // if any only if m2 is a valid message. func emptyBytesForMessage(m Message) []byte { diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 4dba2b969..67948dd1d 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -10,7 +10,7 @@ import ( "reflect" "google.golang.org/protobuf/encoding/protowire" - pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoreflect" ) // Equal reports whether two messages are equal. @@ -33,6 +33,10 @@ func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil } + if reflect.TypeOf(x).Kind() == reflect.Ptr && x == y { + // Avoid an expensive comparison if both inputs are identical pointers. + return true + } mx := x.ProtoReflect() my := y.ProtoReflect() if mx.IsValid() != my.IsValid() { @@ -42,14 +46,14 @@ func Equal(x, y Message) bool { } // equalMessage compares two messages. -func equalMessage(mx, my pref.Message) bool { +func equalMessage(mx, my protoreflect.Message) bool { if mx.Descriptor() != my.Descriptor() { return false } nx := 0 equal := true - mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { nx++ vy := my.Get(fd) equal = my.Has(fd) && equalField(fd, vx, vy) @@ -59,7 +63,7 @@ func equalMessage(mx, my pref.Message) bool { return false } ny := 0 - my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { ny++ return true }) @@ -71,7 +75,7 @@ func equalMessage(mx, my pref.Message) bool { } // equalField compares two fields. -func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch { case fd.IsList(): return equalList(fd, x.List(), y.List()) @@ -83,12 +87,12 @@ func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { } // equalMap compares two maps. -func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { +func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { if x.Len() != y.Len() { return false } equal := true - x.Range(func(k pref.MapKey, vx pref.Value) bool { + x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { vy := y.Get(k) equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) return equal @@ -97,7 +101,7 @@ func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { } // equalList compares two lists. -func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { +func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { if x.Len() != y.Len() { return false } @@ -110,31 +114,31 @@ func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { } // equalValue compares two singular values. -func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { +func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { switch fd.Kind() { - case pref.BoolKind: + case protoreflect.BoolKind: return x.Bool() == y.Bool() - case pref.EnumKind: + case protoreflect.EnumKind: return x.Enum() == y.Enum() - case pref.Int32Kind, pref.Sint32Kind, - pref.Int64Kind, pref.Sint64Kind, - pref.Sfixed32Kind, pref.Sfixed64Kind: + case protoreflect.Int32Kind, protoreflect.Sint32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, + protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: return x.Int() == y.Int() - case pref.Uint32Kind, pref.Uint64Kind, - pref.Fixed32Kind, pref.Fixed64Kind: + case protoreflect.Uint32Kind, protoreflect.Uint64Kind, + protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: return x.Uint() == y.Uint() - case pref.FloatKind, pref.DoubleKind: + case protoreflect.FloatKind, protoreflect.DoubleKind: fx := x.Float() fy := y.Float() if math.IsNaN(fx) || math.IsNaN(fy) { return math.IsNaN(fx) && math.IsNaN(fy) } return fx == fy - case pref.StringKind: + case protoreflect.StringKind: return x.String() == y.String() - case pref.BytesKind: + case protoreflect.BytesKind: return bytes.Equal(x.Bytes(), y.Bytes()) - case pref.MessageKind, pref.GroupKind: + case protoreflect.MessageKind, protoreflect.GroupKind: return equalMessage(x.Message(), y.Message()) default: return x.Interface() == y.Interface() @@ -143,7 +147,7 @@ func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { // equalUnknown compares unknown fields by direct comparison on the raw bytes // of each individual field number. -func equalUnknown(x, y pref.RawFields) bool { +func equalUnknown(x, y protoreflect.RawFields) bool { if len(x) != len(y) { return false } @@ -151,8 +155,8 @@ func equalUnknown(x, y pref.RawFields) bool { return true } - mx := make(map[pref.FieldNumber]pref.RawFields) - my := make(map[pref.FieldNumber]pref.RawFields) + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) for len(x) > 0 { fnum, _, n := protowire.ConsumeField(x) mx[fnum] = append(mx[fnum], x[:n]...) diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6..465e057b3 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d4320..494d6ceef 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index cebb36cda..27d7e3501 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -155,9 +155,9 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // // Suppose the scope was "fizz.buzz" and the reference was "Foo.Bar", // then the following full names are searched: -// * fizz.buzz.Foo.Bar -// * fizz.Foo.Bar -// * Foo.Bar +// - fizz.buzz.Foo.Bar +// - fizz.Foo.Bar +// - Foo.Bar func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.Descriptor, error) { if !ref.IsValid() { return nil, errors.New("invalid name reference: %q", ref) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9..d5d5af6eb 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go index dd85915bd..55aa14922 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -8,8 +8,7 @@ // defined in proto source files and value interfaces which provide the // ability to examine and manipulate the contents of messages. // -// -// Protocol Buffer Descriptors +// # Protocol Buffer Descriptors // // Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) // are immutable objects that represent protobuf type information. @@ -26,8 +25,7 @@ // The "google.golang.org/protobuf/reflect/protodesc" package converts between // google.protobuf.DescriptorProto messages and protobuf descriptors. // -// -// Go Type Descriptors +// # Go Type Descriptors // // A type descriptor (e.g., EnumType or MessageType) is a constructor for // a concrete Go type that represents the associated protobuf descriptor. @@ -41,8 +39,7 @@ // The "google.golang.org/protobuf/types/dynamicpb" package can be used to // create Go type descriptors from protobuf descriptors. // -// -// Value Interfaces +// # Value Interfaces // // The Enum and Message interfaces provide a reflective view over an // enum or message instance. For enums, it provides the ability to retrieve @@ -55,13 +52,11 @@ // The "github.com/golang/protobuf/proto".MessageReflect function can be used // to obtain a reflective view on older messages. // -// -// Relationships +// # Relationships // // The following diagrams demonstrate the relationships between // various types declared in this package. // -// // ┌───────────────────────────────────┐ // V │ // ┌────────────── New(n) ─────────────┐ │ @@ -83,7 +78,6 @@ // // • An Enum is a concrete enum instance. Generated enums implement Enum. // -// // ┌──────────────── New() ─────────────────┐ // │ │ // │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ @@ -98,12 +92,22 @@ // // • A MessageType describes a concrete Go message type. // It has a MessageDescriptor and can construct a Message instance. +// Just as how Go's reflect.Type is a reflective description of a Go type, +// a MessageType is a reflective description of a Go type for a protobuf message. // // • A MessageDescriptor describes an abstract protobuf message type. -// -// • A Message is a concrete message instance. Generated messages implement -// ProtoMessage, which can convert to/from a Message. -// +// It has no understanding of Go types. In order to construct a MessageType +// from just a MessageDescriptor, you can consider looking up the message type +// in the global registry using protoregistry.GlobalTypes.FindMessageByName +// or constructing a dynamic MessageType using dynamicpb.NewMessageType. +// +// • A Message is a reflective view over a concrete message instance. +// Generated messages implement ProtoMessage, which can convert to a Message. +// Just as how Go's reflect.Value is a reflective view over a Go value, +// a Message is a reflective view over a concrete protobuf message instance. +// Using Go reflection as an analogy, the ProtoReflect method is similar to +// calling reflect.ValueOf, and the Message.Interface method is similar to +// calling reflect.Value.Interface. // // ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ // │ V │ V diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go index 121ba3a07..0b9942885 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -87,6 +87,7 @@ func (p1 SourcePath) Equal(p2 SourcePath) bool { // in a future version of this module. // // Example output: +// // .message_type[6].nested_type[15].field[3] func (p SourcePath) String() string { b := p.appendFileDescriptorProto(nil) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index 8e53c44a9..3867470d3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -480,6 +480,7 @@ type ExtensionDescriptors interface { // relative to the parent that it is declared within. // // For example: +// // syntax = "proto2"; // package example; // message FooMessage { diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1..7ced876f4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a3414724..ca8e28c5b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,32 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} @@ -367,6 +393,7 @@ func (v Value) MapKey() MapKey { // ╚═════════╧═════════════════════════════════════╝ // // A MapKey is constructed and accessed through a Value: +// // k := ValueOf("hash").MapKey() // convert string to MapKey // s := k.String() // convert MapKey to string // diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdca..702ddf22a 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 59f024c44..58352a697 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -30,9 +30,11 @@ import ( // conflictPolicy configures the policy for handling registration conflicts. // // It can be over-written at compile time with a linker-initialized variable: +// // go build -ldflags "-X google.golang.org/protobuf/reflect/protoregistry.conflictPolicy=warn" // // It can be over-written at program execution with an environment variable: +// // GOLANG_PROTOBUF_REGISTRATION_CONFLICT=warn ./main // // Neither of the above are covered by the compatibility promise and diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67e..44cf467d8 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go index ff094e1ba..a105cb23e 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -26,16 +26,19 @@ const ( // EnforceVersion is used by code generated by protoc-gen-go // to statically enforce minimum and maximum versions of this package. // A compilation failure implies either that: -// * the runtime package is too old and needs to be updated OR -// * the generated code is too old and needs to be regenerated. +// - the runtime package is too old and needs to be updated OR +// - the generated code is too old and needs to be regenerated. // // The runtime package can be upgraded by running: +// // go get google.golang.org/protobuf // // The generated code can be regenerated by running: +// // protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} // // Example usage by generated code: +// // const ( // // Verify that this generated code is sufficiently up-to-date. // _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) @@ -49,6 +52,7 @@ const ( type EnforceVersion uint // This enforces the following invariant: +// // MinVersion ≤ GenVersion ≤ MaxVersion const ( _ = EnforceVersion(GenVersion - MinVersion) diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS index 8cccebf2e..a2fe8f351 100644 --- a/vendor/k8s.io/klog/v2/OWNERS +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -1,5 +1,6 @@ # See the OWNERS docs at https://go.k8s.io/owners reviewers: + - harshanarayana - pohly approvers: - dims diff --git a/vendor/k8s.io/klog/v2/README.md b/vendor/k8s.io/klog/v2/README.md index 7de2212cc..d45cbe172 100644 --- a/vendor/k8s.io/klog/v2/README.md +++ b/vendor/k8s.io/klog/v2/README.md @@ -28,7 +28,6 @@ Historical context is available here: Semantic versioning is used in this repository. It contains several Go modules with different levels of stability: - `k8s.io/klog/v2` - stable API, `vX.Y.Z` tags -- `k8s.io/hack/tools` - no stable API yet (may change eventually or get moved to separate repo), `hack/tools/v0.Y.Z` tags - `examples` - no stable API, no tags, no intention to ever stabilize Exempt from the API stability guarantee are items (packages, functions, etc.) diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 0bf19280e..2428963c0 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -34,18 +34,6 @@ import ( // mutex locking. var ( - // contextualLoggingEnabled controls whether contextual logging is - // active. Disabling it may have some small performance benefit. - contextualLoggingEnabled = true - - // globalLogger is the global Logger chosen by users of klog, nil if - // none is available. - globalLogger *Logger - - // globalLoggerOptions contains the options that were supplied for - // globalLogger. - globalLoggerOptions loggerOptions - // klogLogger is used as fallback for logging through the normal klog code // when no Logger is set. klogLogger logr.Logger = logr.New(&klogger{}) @@ -59,8 +47,9 @@ var ( // If set, all log lines will be suppressed from the regular output, and // redirected to the logr implementation. // Use as: -// ... -// klog.SetLogger(zapr.NewLogger(zapLog)) +// +// ... +// klog.SetLogger(zapr.NewLogger(zapLog)) // // To remove a backing logr implemention, use ClearLogger. Setting an // empty logger with SetLogger(logr.Logger{}) does not work. @@ -81,10 +70,10 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - globalLogger = &logger - globalLoggerOptions = loggerOptions{} + logging.logger = &logger + logging.loggerOptions = loggerOptions{} for _, opt := range opts { - opt(&globalLoggerOptions) + opt(&logging.loggerOptions) } } @@ -119,8 +108,8 @@ type loggerOptions struct { // Modifying the logger is not thread-safe and should be done while no other // goroutines invoke log calls, usually during program initialization. func ClearLogger() { - globalLogger = nil - globalLoggerOptions = loggerOptions{} + logging.logger = nil + logging.loggerOptions = loggerOptions{} } // EnableContextualLogging controls whether contextual logging is enabled. @@ -132,14 +121,14 @@ func ClearLogger() { // // This must be called during initialization before goroutines are started. func EnableContextualLogging(enabled bool) { - contextualLoggingEnabled = enabled + logging.contextualLoggingEnabled = enabled } // FromContext retrieves a logger set by the caller or, if not set, // falls back to the program's global logger (a Logger instance or klog // itself). func FromContext(ctx context.Context) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { if logger, err := logr.FromContext(ctx); err == nil { return logger } @@ -160,10 +149,10 @@ func TODO() Logger { // better receive a logger via its parameters. TODO can be used as a temporary // solution for such code. func Background() Logger { - if globalLoggerOptions.contextualLogger { - // Is non-nil because globalLoggerOptions.contextualLogger is + if logging.loggerOptions.contextualLogger { + // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *globalLogger + return *logging.logger } return klogLogger @@ -172,7 +161,7 @@ func Background() Logger { // LoggerWithValues returns logger.WithValues(...kv) when // contextual logging is enabled, otherwise the logger. func LoggerWithValues(logger Logger, kv ...interface{}) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithValues(kv...) } return logger @@ -181,7 +170,7 @@ func LoggerWithValues(logger Logger, kv ...interface{}) Logger { // LoggerWithName returns logger.WithName(name) when contextual logging is // enabled, otherwise the logger. func LoggerWithName(logger Logger, name string) Logger { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logger.WithName(name) } return logger @@ -190,7 +179,7 @@ func LoggerWithName(logger Logger, name string) Logger { // NewContext returns logr.NewContext(ctx, logger) when // contextual logging is enabled, otherwise ctx. func NewContext(ctx context.Context, logger Logger) context.Context { - if contextualLoggingEnabled { + if logging.contextualLoggingEnabled { return logr.NewContext(ctx, logger) } return ctx diff --git a/vendor/k8s.io/klog/v2/internal/dbg/dbg.go b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go new file mode 100644 index 000000000..f27bd1447 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/dbg/dbg.go @@ -0,0 +1,42 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package dbg provides some helper code for call traces. +package dbg + +import ( + "runtime" +) + +// Stacks is a wrapper for runtime.Stack that attempts to recover the data for +// all goroutines or the calling one. +func Stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index d89731368..ad6bf1116 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -20,6 +20,8 @@ import ( "bytes" "fmt" "strconv" + + "github.com/go-logr/logr" ) // WithValues implements LogSink.WithValues. The old key/value pairs are @@ -44,53 +46,49 @@ func WithValues(oldKV, newKV []interface{}) []interface{} { return kv } -// TrimDuplicates deduplicates elements provided in multiple key/value tuple -// slices, whilst maintaining the distinction between where the items are -// contained. -func TrimDuplicates(kvLists ...[]interface{}) [][]interface{} { - // maintain a map of all seen keys - seenKeys := map[interface{}]struct{}{} - // build the same number of output slices as inputs - outs := make([][]interface{}, len(kvLists)) - // iterate over the input slices backwards, as 'later' kv specifications - // of the same key will take precedence over earlier ones - for i := len(kvLists) - 1; i >= 0; i-- { - // initialise this output slice - outs[i] = []interface{}{} - // obtain a reference to the kvList we are processing - // and make sure it has an even number of entries - kvList := kvLists[i] - if len(kvList)%2 != 0 { - kvList = append(kvList, missingValue) - } +// MergeKVs deduplicates elements provided in two key/value slices. +// +// Keys in each slice are expected to be unique, so duplicates can only occur +// when the first and second slice contain the same key. When that happens, the +// key/value pair from the second slice is used. The first slice must be well-formed +// (= even key/value pairs). The second one may have a missing value, in which +// case the special "missing value" is added to the result. +func MergeKVs(first, second []interface{}) []interface{} { + maxLength := len(first) + (len(second)+1)/2*2 + if maxLength == 0 { + // Nothing to do at all. + return nil + } - // start iterating at len(kvList) - 2 (i.e. the 2nd last item) for - // slices that have an even number of elements. - // We add (len(kvList) % 2) here to handle the case where there is an - // odd number of elements in a kvList. - // If there is an odd number, then the last element in the slice will - // have the value 'null'. - for i2 := len(kvList) - 2 + (len(kvList) % 2); i2 >= 0; i2 -= 2 { - k := kvList[i2] - // if we have already seen this key, do not include it again - if _, ok := seenKeys[k]; ok { - continue - } - // make a note that we've observed a new key - seenKeys[k] = struct{}{} - // attempt to obtain the value of the key - var v interface{} - // i2+1 should only ever be out of bounds if we handling the first - // iteration over a slice with an odd number of elements - if i2+1 < len(kvList) { - v = kvList[i2+1] - } - // add this KV tuple to the *start* of the output list to maintain - // the original order as we are iterating over the slice backwards - outs[i] = append([]interface{}{k, v}, outs[i]...) + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + return second + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + merged := make([]interface{}, 0, maxLength) + for i := 0; i+1 < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue } + merged = append(merged, key, first[i+1]) } - return outs + merged = append(merged, second...) + if len(merged)%2 != 0 { + merged = append(merged, missingValue) + } + return merged } const missingValue = "(MISSING)" @@ -111,10 +109,10 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments // for the sake of performance. Keys with spaces, // special characters, etc. will break parsing. - if k, ok := k.(string); ok { + if sK, ok := k.(string); ok { // Avoid one allocation when the key is a string, which // normally it should be. - b.WriteString(k) + b.WriteString(sK) } else { b.WriteString(fmt.Sprintf("%s", k)) } @@ -131,6 +129,24 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { writeStringValue(b, true, v) case error: writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, true, value) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", value)) + } case []byte: // In https://github.com/kubernetes/klog/pull/237 it was decided // to format byte slices with "%+q". The advantages of that are: @@ -163,6 +179,18 @@ func StringerToString(s fmt.Stringer) (ret string) { return } +// MarshalerToValue invokes a marshaler and catches +// panics. +func MarshalerToValue(m logr.Marshaler) (ret interface{}) { + defer func() { + if err := recover(); err != nil { + ret = fmt.Sprintf("", err) + } + }() + ret = m.MarshalLog() + return +} + // ErrorToString converts an error to a string, // handling panics if they occur. func ErrorToString(err error) (ret string) { diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index db58f8baa..2c218f698 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -77,6 +77,8 @@ func KRef(namespace, name string) ObjectRef { } // KObjs returns slice of ObjectRef from an slice of ObjectMeta +// +// DEPRECATED: Use KObjSlice instead, it has better performance. func KObjs(arg interface{}) []ObjectRef { s := reflect.ValueOf(arg) if s.Kind() != reflect.Slice { @@ -92,3 +94,65 @@ func KObjs(arg interface{}) []ObjectRef { } return objectRefs } + +// KObjSlice takes a slice of objects that implement the KMetadata interface +// and returns an object that gets logged as a slice of ObjectRef values or a +// string containing those values, depending on whether the logger prefers text +// output or structured output. +// +// An error string is logged when KObjSlice is not passed a suitable slice. +// +// Processing of the argument is delayed until the value actually gets logged, +// in contrast to KObjs where that overhead is incurred regardless of whether +// the result is needed. +func KObjSlice(arg interface{}) interface{} { + return kobjSlice{arg: arg} +} + +type kobjSlice struct { + arg interface{} +} + +var _ fmt.Stringer = kobjSlice{} +var _ logr.Marshaler = kobjSlice{} + +func (ks kobjSlice) String() string { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return fmt.Sprintf("%v", objectRefs) +} + +func (ks kobjSlice) MarshalLog() interface{} { + objectRefs, err := ks.process() + if err != nil { + return err.Error() + } + return objectRefs +} + +func (ks kobjSlice) process() ([]interface{}, error) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as nil. + return nil, nil + case reflect.Slice: + // Okay, handle below. + default: + return nil, fmt.Errorf("", ks.arg) + } + objectRefs := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + item := s.Index(i).Interface() + if item == nil { + objectRefs = append(objectRefs, nil) + } else if v, ok := item.(KMetadata); ok { + objectRefs = append(objectRefs, KObj(v)) + } else { + return nil, fmt.Errorf("", item) + } + } + return objectRefs, nil +} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index cb04590fe..1bd11b675 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -39,35 +39,38 @@ // This package provides several flags that modify this behavior. // As a result, flag.Parse must be called before any logging is done. // -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. +// -logtostderr=true +// Logs are written to standard error instead of to files. +// This shortcuts most of the usual output routing: +// -alsologtostderr, -stderrthreshold and -log_dir have no +// effect and output redirection at runtime with SetOutput is +// ignored. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. // -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". +// Other flags provide aids to debugging. // +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". package klog import ( @@ -92,6 +95,7 @@ import ( "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" + "k8s.io/klog/v2/internal/dbg" "k8s.io/klog/v2/internal/serialize" "k8s.io/klog/v2/internal/severity" ) @@ -242,6 +246,10 @@ func (m *moduleSpec) String() string { // Lock because the type is not atomic. TODO: clean this up. logging.mu.Lock() defer logging.mu.Unlock() + return m.serialize() +} + +func (m *moduleSpec) serialize() string { var b bytes.Buffer for i, f := range m.filter { if i > 0 { @@ -263,6 +271,17 @@ var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of // Set will sets module value // Syntax: -vmodule=recordio=2,file=1,gfs*=3 func (m *moduleSpec) Set(value string) error { + filter, err := parseModuleSpec(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +func parseModuleSpec(value string) ([]modulePat, error) { var filter []modulePat for _, pat := range strings.Split(value, ",") { if len(pat) == 0 { @@ -271,15 +290,15 @@ func (m *moduleSpec) Set(value string) error { } patLev := strings.Split(pat, "=") if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax + return nil, errVmoduleSyntax } pattern := patLev[0] v, err := strconv.ParseInt(patLev[1], 10, 32) if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") + return nil, errors.New("syntax error: expect comma-separated list of filename=N") } if v < 0 { - return errors.New("negative value for vmodule level") + return nil, errors.New("negative value for vmodule level") } if v == 0 { continue // Ignore. It's harmless but no point in paying the overhead. @@ -287,10 +306,7 @@ func (m *moduleSpec) Set(value string) error { // TODO: check syntax of filter? filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil + return filter, nil } // isLiteral reports whether the pattern is a literal string, that is, has no metacharacters @@ -380,45 +396,48 @@ type flushSyncWriter interface { io.Writer } -// init sets up the defaults. +var logging loggingT +var commandLine flag.FlagSet + +// init sets up the defaults and creates command line flags. func init() { + commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)") + commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)") + commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800, + "Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files") + commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)") + logging.setVState(0, nil, false) + commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity") + commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages") + commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages") + commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)") + commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)") logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - logging.oneOutput = false + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + logging.settings.contextualLoggingEnabled = true logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil) } // InitFlags is for explicitly initializing the flags. +// It may get called repeatedly for different flagsets, but not +// twice for the same one. May get called concurrently +// to other goroutines using klog. However, only some flags +// may get set concurrently (see implementation). func InitFlags(flagset *flag.FlagSet) { if flagset == nil { flagset = flag.CommandLine } - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level)") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + commandLine.VisitAll(func(f *flag.Flag) { + flagset.Var(f.Value, f.Name, f.Usage) + }) } // Flush flushes all pending log I/O. @@ -426,8 +445,20 @@ func Flush() { logging.lockAndFlushAll() } -// loggingT collects all the global state of the logging setup. -type loggingT struct { +// settings collects global settings. +type settings struct { + // contextualLoggingEnabled controls whether contextual logging is + // active. Disabling it may have some small performance benefit. + contextualLoggingEnabled bool + + // logger is the global Logger chosen by users of klog, nil if + // none is available. + logger *Logger + + // loggerOptions contains the options that were supplied for + // globalLogger. + loggerOptions loggerOptions + // Boolean flags. Not handled atomically because the flag.Value interface // does not let us avoid the =true, and that shorthand is necessary for // compatibility. TODO: does this matter enough to fix? Seems unlikely. @@ -437,26 +468,14 @@ type loggingT struct { // Level flag. Handled atomically. stderrThreshold severityValue // The -stderrthreshold flag. - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers + // Access to all of the following fields must be protected via a mutex. - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex // file holds writer for each of the log types. file [severity.NumSeverity]flushSyncWriter - // flushD holds a flushDaemon that frequently flushes log file buffers. - flushD *flushDaemon // flushInterval is the interval for periodic flushing. If zero, // the global default will be used. flushInterval time.Duration - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater // than zero, it means vmodule is enabled. It may be read safely // using sync.LoadInt32, but is only modified under mu. @@ -496,7 +515,42 @@ type loggingT struct { filter LogFilter } -var logging loggingT +// deepCopy creates a copy that doesn't share anything with the original +// instance. +func (s settings) deepCopy() settings { + // vmodule is a slice and would be shared, so we have copy it. + filter := make([]modulePat, len(s.vmodule.filter)) + for i := range s.vmodule.filter { + filter[i] = s.vmodule.filter[i] + } + s.vmodule.filter = filter + + return s +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + settings + + // bufferCache maintains the free list. It uses its own mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + bufferCache buffer.Buffers + + // flushD holds a flushDaemon that frequently flushes log file buffers. + // Uses its own mutex. + flushD *flushDaemon + + // mu protects the remaining elements of this structure and the fields + // in settingsT which need a mutex lock. + mu sync.Mutex + + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level +} // setVState sets a consistent state for V logging. // l.mu is held. @@ -520,14 +574,66 @@ func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool var timeNow = time.Now // Stubbed out for testing. +// CaptureState gathers information about all current klog settings. +// The result can be used to restore those settings. +func CaptureState() State { + logging.mu.Lock() + defer logging.mu.Unlock() + return &state{ + settings: logging.settings.deepCopy(), + flushDRunning: logging.flushD.isRunning(), + maxSize: MaxSize, + } +} + +// State stores a snapshot of klog settings. It gets created with CaptureState +// and can be used to restore the entire state. Modifying individual settings +// is supported via the command line flags. +type State interface { + // Restore restore the entire state. It may get called more than once. + Restore() +} + +type state struct { + settings + + flushDRunning bool + maxSize uint64 +} + +func (s *state) Restore() { + // This needs to be done before mutex locking. + if s.flushDRunning && !logging.flushD.isRunning() { + // This is not quite accurate: StartFlushDaemon might + // have been called with some different interval. + interval := s.flushInterval + if interval == 0 { + interval = flushInterval + } + logging.flushD.run(interval) + } else if !s.flushDRunning && logging.flushD.isRunning() { + logging.flushD.stop() + } + + logging.mu.Lock() + defer logging.mu.Unlock() + + logging.settings = s.settings + logging.setVState(s.verbosity, s.vmodule.filter, true) + MaxSize = s.maxSize +} + /* header formats a log header as defined by the C++ implementation. It returns a buffer containing the formatted header and the user's file and line number. The depth specifies how many stack frames above lives the source line to be identified in the log message. Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... + where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) mm The month (zero padded; ie May is '05') dd The day (zero padded) @@ -688,7 +794,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, globalLogger, nil, depth+1, &b.Buffer) + l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. l.bufferCache.PutBuffer(b) } @@ -757,7 +863,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf if l.traceLocation.isSet() { if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) + buf.Write(dbg.Stacks(false)) } } data := buf.Bytes() @@ -765,7 +871,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { - globalLogger.WithCallDepth(depth+3).Error(nil, string(data)) + logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) } else { log.WithCallDepth(depth + 3).Info(string(data)) } @@ -822,12 +928,15 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf OsExit(1) } // Dump all goroutine stacks before exiting. - trace := stacks(true) - // Write the stack trace for all goroutines to the stderr. - if l.toStderr || l.alsoToStderr || s >= l.stderrThreshold.get() || alsoToStderr { - os.Stderr.Write(trace) + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(dbg.Stacks(false)) } + // Write the stack trace for all goroutines to the files. + trace := dbg.Stacks(true) logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. @@ -847,25 +956,6 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - // logExitFunc provides a simple mechanism to override the default behavior // of exiting on error. Used in testing and to guarantee we reach a required exit // for fatal logs. Instead, exit could be a function rather than a method but that @@ -1077,9 +1167,9 @@ func (f *flushDaemon) isRunning() bool { return f.stopC != nil } -// StopFlushDaemon stops the flush daemon, if running. +// StopFlushDaemon stops the flush daemon, if running, and flushes once. // This prevents klog from leaking goroutines on shutdown. After stopping -// the daemon, you can still manually flush buffers by calling Flush(). +// the daemon, you can still manually flush buffers again by calling Flush(). func StopFlushDaemon() { logging.flushD.stop() } @@ -1109,8 +1199,8 @@ func (l *loggingT) flushAll() { file.Sync() // ignore error } } - if globalLoggerOptions.flush != nil { - globalLoggerOptions.flush() + if logging.loggerOptions.flush != nil { + logging.loggerOptions.flush() } } @@ -1158,7 +1248,7 @@ func (lb logBridge) Write(b []byte) (n int, err error) { } // printWithFileLine with alsoToStderr=true, so standard log messages // always appear on standard error. - logging.printWithFileLine(severity.Severity(lb), globalLogger, logging.filter, file, line, true, text) + logging.printWithFileLine(severity.Severity(lb), logging.logger, logging.filter, file, line, true, text) return len(b), nil } @@ -1196,10 +1286,10 @@ type Verbose struct { } func newVerbose(level Level, b bool) Verbose { - if globalLogger == nil { + if logging.logger == nil { return Verbose{b, nil} } - v := globalLogger.V(int(level)) + v := logging.logger.V(int(level)) return Verbose{b, &v} } @@ -1207,9 +1297,13 @@ func newVerbose(level Level, b bool) Verbose { // The returned value is a struct of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if klog.V(2).Enabled() { klog.Info("log this") } +// // or +// // klog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1318,7 +1412,7 @@ func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { // InfoSDepth acts as InfoS but uses depth to determine which call frame to log. // InfoSDepth(0, "msg") is the same as InfoS("msg"). func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, depth, msg, keysAndValues...) } // InfoSDepth is equivalent to the global InfoSDepth function, guarded by the value of v. @@ -1347,37 +1441,37 @@ func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Info(args ...interface{}) { - logging.print(severity.InfoLog, globalLogger, logging.filter, args...) + logging.print(severity.InfoLog, logging.logger, logging.filter, args...) } // InfoDepth acts as Info but uses depth to determine which call frame to log. // InfoDepth(0, "msg") is the same as Info("msg"). func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { - logging.println(severity.InfoLog, globalLogger, logging.filter, args...) + logging.println(severity.InfoLog, logging.logger, logging.filter, args...) } // InfolnDepth acts as Infoln but uses depth to determine which call frame to log. // InfolnDepth(0, "msg") is the same as Infoln("msg"). func InfolnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.InfoLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, logging.logger, logging.filter, depth, args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Infof(format string, args ...interface{}) { - logging.printf(severity.InfoLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.InfoLog, logging.logger, logging.filter, format, args...) } // InfofDepth acts as Infof but uses depth to determine which call frame to log. // InfofDepth(0, "msg", args...) is the same as Infof("msg", args...). func InfofDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.InfoLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, logging.logger, logging.filter, depth, format, args...) } // InfoS structured logs to the INFO log. @@ -1389,79 +1483,79 @@ func InfofDepth(depth int, format string, args ...interface{}) { // output: // >> I1025 00:15:15.525108 1 controller_utils.go:116] "Pod status updated" pod="kubedns" status="ready" func InfoS(msg string, keysAndValues ...interface{}) { - logging.infoS(globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.infoS(logging.logger, logging.filter, 0, msg, keysAndValues...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Warning(args ...interface{}) { - logging.print(severity.WarningLog, globalLogger, logging.filter, args...) + logging.print(severity.WarningLog, logging.logger, logging.filter, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { - logging.println(severity.WarningLog, globalLogger, logging.filter, args...) + logging.println(severity.WarningLog, logging.logger, logging.filter, args...) } // WarninglnDepth acts as Warningln but uses depth to determine which call frame to log. // WarninglnDepth(0, "msg") is the same as Warningln("msg"). func WarninglnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.WarningLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.WarningLog, logging.logger, logging.filter, depth, args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Warningf(format string, args ...interface{}) { - logging.printf(severity.WarningLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.WarningLog, logging.logger, logging.filter, format, args...) } // WarningfDepth acts as Warningf but uses depth to determine which call frame to log. // WarningfDepth(0, "msg", args...) is the same as Warningf("msg", args...). func WarningfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.WarningLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.WarningLog, logging.logger, logging.filter, depth, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Error(args ...interface{}) { - logging.print(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.print(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { - logging.println(severity.ErrorLog, globalLogger, logging.filter, args...) + logging.println(severity.ErrorLog, logging.logger, logging.filter, args...) } // ErrorlnDepth acts as Errorln but uses depth to determine which call frame to log. // ErrorlnDepth(0, "msg") is the same as Errorln("msg"). func ErrorlnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.ErrorLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.ErrorLog, logging.logger, logging.filter, depth, args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Errorf(format string, args ...interface{}) { - logging.printf(severity.ErrorLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.ErrorLog, logging.logger, logging.filter, format, args...) } // ErrorfDepth acts as Errorf but uses depth to determine which call frame to log. // ErrorfDepth(0, "msg", args...) is the same as Errorf("msg", args...). func ErrorfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.ErrorLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.ErrorLog, logging.logger, logging.filter, depth, format, args...) } // ErrorS structured logs to the ERROR, WARNING, and INFO logs. @@ -1474,52 +1568,63 @@ func ErrorfDepth(depth int, format string, args ...interface{}) { // output: // >> E1025 00:15:15.525108 1 controller_utils.go:114] "Failed to update pod status" err="timeout" func ErrorS(err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, 0, msg, keysAndValues...) } // ErrorSDepth acts as ErrorS but uses depth to determine which call frame to log. // ErrorSDepth(0, "msg") is the same as ErrorS("msg"). func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{}) { - logging.errorS(err, globalLogger, logging.filter, depth, msg, keysAndValues...) + logging.errorS(err, logging.logger, logging.filter, depth, msg, keysAndValues...) } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls OsExit(255). +// prints stack trace(s), then calls OsExit(255). +// +// Stderr only receives a dump of the current goroutine's stack trace. Log files, +// if there are any, receive a dump of the stack traces in all goroutines. +// +// Callers who want more control over handling of fatal events may instead use a +// combination of different functions: +// - some info or error logging function, optionally with a stack trace +// value generated by github.com/go-logr/lib/dbg.Backtrace +// - Flush to flush pending log data +// - panic, os.Exit or returning to the caller with an error +// // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Fatal(args ...interface{}) { - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // FatallnDepth acts as Fatalln but uses depth to determine which call frame to log. // FatallnDepth(0, "msg") is the same as Fatalln("msg"). func FatallnDepth(depth int, args ...interface{}) { - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls OsExit(255). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Fatalf(format string, args ...interface{}) { - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // FatalfDepth acts as Fatalf but uses depth to determine which call frame to log. // FatalfDepth(0, "msg", args...) is the same as Fatalf("msg", args...). func FatalfDepth(depth int, format string, args ...interface{}) { - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. @@ -1530,41 +1635,41 @@ var fatalNoStacks uint32 // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. func Exit(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(severity.FatalLog, globalLogger, logging.filter, args...) + logging.print(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). func ExitDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). func Exitln(args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(severity.FatalLog, globalLogger, logging.filter, args...) + logging.println(severity.FatalLog, logging.logger, logging.filter, args...) } // ExitlnDepth acts as Exitln but uses depth to determine which call frame to log. // ExitlnDepth(0, "msg") is the same as Exitln("msg"). func ExitlnDepth(depth int, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printlnDepth(severity.FatalLog, globalLogger, logging.filter, depth, args...) + logging.printlnDepth(severity.FatalLog, logging.logger, logging.filter, depth, args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls OsExit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. func Exitf(format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(severity.FatalLog, globalLogger, logging.filter, format, args...) + logging.printf(severity.FatalLog, logging.logger, logging.filter, format, args...) } // ExitfDepth acts as Exitf but uses depth to determine which call frame to log. // ExitfDepth(0, "msg", args...) is the same as Exitf("msg", args...). func ExitfDepth(depth int, format string, args ...interface{}) { atomic.StoreUint32(&fatalNoStacks, 1) - logging.printfDepth(severity.FatalLog, globalLogger, logging.filter, depth, format, args...) + logging.printfDepth(severity.FatalLog, logging.logger, logging.filter, depth, format, args...) } // LogFilter is a collection of functions that can filter all logging calls, diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 351d7a740..027a4014a 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -43,11 +43,11 @@ func (l *klogger) Init(info logr.RuntimeInfo) { } func (l klogger) Info(level int, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, append(trimmed[0], trimmed[1]...)...) + V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l klogger) Enabled(level int) bool { @@ -55,11 +55,11 @@ func (l klogger) Enabled(level int) bool { } func (l klogger) Error(err error, msg string, kvList ...interface{}) { - trimmed := serialize.TrimDuplicates(l.values, kvList) + merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - ErrorSDepth(l.callDepth+1, err, msg, append(trimmed[0], trimmed[1]...)...) + ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index c84ca7031..1a6c12e17 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -20,7 +20,7 @@ import ( "net/http" "strings" - "github.com/emicklei/go-restful" + "github.com/emicklei/go-restful/v3" "k8s.io/kube-openapi/pkg/openapiconv" "k8s.io/kube-openapi/pkg/spec3" @@ -246,38 +246,42 @@ var schemaTypeFormatMap = map[string]typeInfo{ // the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple // type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. // Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // // IntOrString documentation... // type IntOrString { ... } // // Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } // // Implement OpenAPIDefinitionGetter for IntOrString: // -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// // ... // definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } // +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } func OpenAPITypeFormat(typeName string) (string, string) { mapped, ok := schemaTypeFormatMap[typeName] if !ok { diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index ec4adcdec..2c730f1bf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -21,7 +21,6 @@ import ( "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "net/url" "path" @@ -41,15 +40,9 @@ import ( ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" ) // OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 @@ -84,12 +77,6 @@ type OpenAPIV3Group struct { etagCache handler.HandlerCache } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) -} - func computeETag(data []byte) string { if data == nil { return "" @@ -154,7 +141,7 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by } etagBytes, err := v.etagCache.Get() return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { + } else if getType == subTypeProtobuf || getType == subTypeProtobufDeprecated { specPb, err := v.pbCache.Get() if err != nil { return nil, "", v.lastModified, err @@ -191,6 +178,8 @@ func ToV3ProtoBinary(json []byte) ([]byte, error) { func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { data, _ := o.getGroupBytes() + w.Header().Set("Etag", strconv.Quote(computeETag(data))) + w.Header().Set("Content-Type", "application/json") http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) } @@ -210,11 +199,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque } accepted := []struct { - Type string - SubType string + Type string + SubType string + ReturnedContentType string }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, } for _, clause := range clauses { @@ -229,6 +220,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque if err != nil { return } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go new file mode 100644 index 000000000..bef603782 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -0,0 +1,24 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +// Used by tests to selectively disable experimental JSON unmarshaler +var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = true + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 000000000..7393bacf7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS similarity index 100% rename from vendor/google.golang.org/protobuf/AUTHORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/AUTHORS diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS similarity index 100% rename from vendor/google.golang.org/protobuf/CONTRIBUTORS rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/CONTRIBUTORS diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE similarity index 96% rename from vendor/github.com/PuerkitoBio/urlesc/LICENSE rename to vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE index 744875676..244127300 100644 --- a/vendor/github.com/PuerkitoBio/urlesc/LICENSE +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2020 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md new file mode 100644 index 000000000..0349adf69 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/README.md @@ -0,0 +1,321 @@ +# JSON Serialization (v2) + +[![GoDev](https://img.shields.io/static/v1?label=godev&message=reference&color=00add8)](https://pkg.go.dev/github.com/go-json-experiment/json) +[![Build Status](https://github.com/go-json-experiment/json/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/go-json-experiment/json/actions) + +This module hosts an experimental implementation of v2 `encoding/json`. +The API is unstable and breaking changes will regularly be made. +Do not depend on this in publicly available modules. + +## Goals and objectives + +* **Mostly backwards compatible:** If possible, v2 should aim to be _mostly_ +compatible with v1 in terms of both API and default behavior to ease migration. +For example, the `Marshal` and `Unmarshal` functions are the most widely used +declarations in the v1 package. It seems sensible for equivalent functionality +in v2 to be named the same and have the same signature. +Behaviorally, we should aim for 95% to 99% backwards compatibility. +We do not aim for 100% compatibility since we want the freedom to break +certain behaviors that are now considered to have been a mistake. +We may provide options that can bring the v2 implementation to 100% compatibility, +but it will not be the default. + +* **More flexible:** There is a +[long list of feature requests](https://github.com/golang/go/issues?q=is%3Aissue+is%3Aopen+encoding%2Fjson+in%3Atitle). +We should aim to provide the most flexible features that addresses most usages. +We do not want to over fit the v2 API to handle every possible use case. +Ideally, the features provided should be orthogonal in nature such that +any combination of features results in as few surprising edge cases as possible. + +* **More performant:** JSON serialization is widely used and any bit of extra +performance gains will be greatly appreciated. Some rarely used behaviors of v1 +may be dropped in favor of better performance. For example, +despite `Encoder` and `Decoder` operating on an `io.Writer` and `io.Reader`, +they do not operate in a truly streaming manner, +leading to a loss in performance. The v2 implementation should aim to be truly +streaming by default (see [#33714](https://golang.org/issue/33714)). + +* **Easy to use (hard to misuse):** The v2 API should aim to make +the common case easy and the less common case at least possible. +The API should avoid behavior that goes contrary to user expectation, +which may result in subtle bugs (see [#36225](https://golang.org/issue/36225)). + +* **v1 and v2 maintainability:** Since the v1 implementation must stay forever, +it would be beneficial if v1 could be implemented under the hood with v2, +allowing for less maintenance burden in the future. This probably implies that +behavioral changes in v2 relative to v1 need to be exposed as options. + +* **Avoid unsafe:** Standard library packages generally avoid the use of +package `unsafe` even if it could provide a performance boost. +We aim to preserve this property. + +## Expectations + +While this module aims to possibly be the v2 implementation of `encoding/json`, +there is no guarantee that this outcome will occur. As with any major change +to the Go standard library, this will eventually go through the +[Go proposal process](https://github.com/golang/proposal#readme). +At the present moment, this is still in the design and experimentation phase +and is not ready for a formal proposal. + +There are several possible outcomes from this experiment: +1. We determine that a v2 `encoding/json` would not provide sufficient benefit +over the existing v1 `encoding/json` package. Thus, we abandon this effort. +2. We propose a v2 `encoding/json` design, but it is rejected in favor of some +other design that is considered superior. +3. We propose a v2 `encoding/json` design, but rather than adding an entirely +new v2 `encoding/json` package, we decide to merge its functionality into +the existing v1 `encoding/json` package. +4. We propose a v2 `encoding/json` design and it is accepted, resulting in +its addition to the standard library. +5. Some other unforeseen outcome (among the infinite number of possibilities). + +## Development + +This module is primarily developed by +[@dsnet](https://github.com/dsnet), +[@mvdan](https://github.com/mvdan), and +[@johanbrandhorst](https://github.com/johanbrandhorst) +with feedback provided by +[@rogpeppe](https://github.com/rogpeppe), +[@ChrisHines](https://github.com/ChrisHines), and +[@rsc](https://github.com/rsc). + +Discussion about semantics occur semi-regularly, where a +[record of past meetings can be found here](https://docs.google.com/document/d/1rovrOTd-wTawGMPPlPuKhwXaYBg9VszTXR9AQQL5LfI/edit?usp=sharing). + +## Design overview + +This package aims to provide a clean separation between syntax and semantics. +Syntax deals with the structural representation of JSON (as specified in +[RFC 4627](https://tools.ietf.org/html/rfc4627), +[RFC 7159](https://tools.ietf.org/html/rfc7159), +[RFC 7493](https://tools.ietf.org/html/rfc7493), +[RFC 8259](https://tools.ietf.org/html/rfc8259), and +[RFC 8785](https://tools.ietf.org/html/rfc8785)). +Semantics deals with the meaning of syntactic data as usable application data. + +The `Encoder` and `Decoder` types are streaming tokenizers concerned with the +packing or parsing of JSON data. They operate on `Token` and `RawValue` types +which represent the common data structures that are representable in JSON. +`Encoder` and `Decoder` do not aim to provide any interpretation of the data. + +Functions like `Marshal`, `MarshalFull`, `MarshalNext`, `Unmarshal`, +`UnmarshalFull`, and `UnmarshalNext` provide semantic meaning by correlating +any arbitrary Go type with some JSON representation of that type (as stored in +data types like `[]byte`, `io.Writer`, `io.Reader`, `Encoder`, or `Decoder`). + +![API overview](api.png) + +This diagram provides a high-level overview of the v2 `json` package. +Purple blocks represent types, while blue blocks represent functions or methods. +The arrows and their direction represent the approximate flow of data. +The bottom half of the diagram contains functionality that is only concerned +with syntax, while the upper half contains functionality that assigns +semantic meaning to syntactic data handled by the bottom half. + +In contrast to v1 `encoding/json`, options are represented as separate types +rather than being setter methods on the `Encoder` or `Decoder` types. + +## Behavior changes + +The v2 `json` package changes the default behavior of `Marshal` and `Unmarshal` +relative to the v1 `json` package to be more sensible. +Some of these behavior changes have options and workarounds to opt into +behavior similar to what v1 provided. + +This table shows an overview of the changes: + +| v1 | v2 | Details | +| -- | -- | ------- | +| JSON object members are unmarshaled into a Go struct using a **case-insensitive name match**. | JSON object members are unmarshaled into a Go struct using a **case-sensitive name match**. | [CaseSensitivity](/diff_test.go#:~:text=TestCaseSensitivity) | +| When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value is an empty Go value**, which is defined as false, 0, a nil pointer, a nil interface value, and any empty array, slice, map, or string. | When marshaling a Go struct, a struct field marked as `omitempty` is omitted if **the field value would encode as an empty JSON value**, which is defined as a JSON null, or an empty JSON string, object, or array. | [OmitEmptyOption](/diff_test.go#:~:text=TestOmitEmptyOption) | +| The `string` option **does affect** Go bools. | The `string` option **does not affect** Go bools. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **does not recursively affect** sub-values of the Go field value. | The `string` option **does recursively affect** sub-values of the Go field value. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| The `string` option **sometimes accepts** a JSON null escaped within a JSON string. | The `string` option **never accepts** a JSON null escaped within a JSON string. | [StringOption](/diff_test.go#:~:text=TestStringOption) | +| A nil Go slice is marshaled as a **JSON null**. | A nil Go slice is marshaled as an **empty JSON array**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A nil Go map is marshaled as a **JSON null**. | A nil Go map is marshaled as an **empty JSON object**. | [NilSlicesAndMaps](/diff_test.go#:~:text=TestNilSlicesAndMaps) | +| A Go array may be unmarshaled from a **JSON array of any length**. | A Go array must be unmarshaled from a **JSON array of the same length**. | [Arrays](/diff_test.go#:~:text=Arrays) | +| A Go byte array is represented as a **JSON array of JSON numbers**. | A Go byte array is represented as a **Base64-encoded JSON string**. | [ByteArrays](/diff_test.go#:~:text=TestByteArrays) | +| `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **inconsistently called**. | `MarshalJSON` and `UnmarshalJSON` methods declared on a pointer receiver are **consistently called**. | [PointerReceiver](/diff_test.go#:~:text=TestPointerReceiver) | +| A Go map is marshaled in a **deterministic order**. | A Go map is marshaled in a **non-deterministic order**. | [MapDeterminism](/diff_test.go#:~:text=TestMapDeterminism) | +| JSON strings are encoded **with HTML-specific characters being escaped**. | JSON strings are encoded **without any characters being escaped** (unless necessary). | [EscapeHTML](/diff_test.go#:~:text=TestEscapeHTML) | +| When marshaling, invalid UTF-8 within a Go string **are silently replaced**. | When marshaling, invalid UTF-8 within a Go string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When unmarshaling, invalid UTF-8 within a JSON string **are silently replaced**. | When unmarshaling, invalid UTF-8 within a JSON string **results in an error**. | [InvalidUTF8](/diff_test.go#:~:text=TestInvalidUTF8) | +| When marshaling, **an error does not occur** if the output JSON value contains objects with duplicate names. | When marshaling, **an error does occur** if the output JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| When unmarshaling, **an error does not occur** if the input JSON value contains objects with duplicate names. | When unmarshaling, **an error does occur** if the input JSON value contains objects with duplicate names. | [DuplicateNames](/diff_test.go#:~:text=TestDuplicateNames) | +| Unmarshaling a JSON null into a non-empty Go value **inconsistently clears the value or does nothing**. | Unmarshaling a JSON null into a non-empty Go value **always clears the value**. | [MergeNull](/diff_test.go#:~:text=TestMergeNull) | +| Unmarshaling a JSON value into a non-empty Go value **follows inconsistent and bizarre behavior**. | Unmarshaling a JSON value into a non-empty Go value **always merges if the input is an object, and otherwise replaces**. | [MergeComposite](/diff_test.go#:~:text=TestMergeComposite) | +| A `time.Duration` is represented as a **JSON number containing the decimal number of nanoseconds**. | A `time.Duration` is represented as a **JSON string containing the formatted duration (e.g., "1h2m3.456s")**. | [TimeDurations](/diff_test.go#:~:text=TestTimeDurations) | +| Unmarshaling a JSON number into a Go float beyond its representation **results in an error**. | Unmarshaling a JSON number into a Go float beyond its representation **uses the closest representable value (e.g., ±`math.MaxFloat`)**. | [MaxFloats](/diff_test.go#:~:text=TestMaxFloats) | +| A Go struct with only unexported fields **can be serialized**. | A Go struct with only unexported fields **cannot be serialized**. | [EmptyStructs](/diff_test.go#:~:text=TestEmptyStructs) | +| A Go struct that embeds an unexported struct type **can sometimes be serialized**. | A Go struct that embeds an unexported struct type **cannot be serialized**. | [EmbedUnexported](/diff_test.go#:~:text=TestEmbedUnexported) | + +See [diff_test.go](/diff_test.go) for details about every change. + +## Performance + +One of the goals of the v2 module is to be more performant than v1. + +Each of the charts below show the performance across +several different JSON implementations: + +* `JSONv1` is `encoding/json` at `v1.18.2` +* `JSONv2` is `github.com/go-json-experiment/json` at `v0.0.0-20220524042235-dd8be80fc4a7` +* `JSONIterator` is `github.com/json-iterator/go` at `v1.1.12` +* `SegmentJSON` is `github.com/segmentio/encoding/json` at `v0.3.5` +* `GoJSON` is `github.com/goccy/go-json` at `v0.9.7` +* `SonicJSON` is `github.com/bytedance/sonic` at `v1.3.0` + +Benchmarks were run across various datasets: + +* `CanadaGeometry` is a GeoJSON (RFC 7946) representation of Canada. + It contains many JSON arrays of arrays of two-element arrays of numbers. +* `CITMCatalog` contains many JSON objects using numeric names. +* `SyntheaFHIR` is sample JSON data from the healthcare industry. + It contains many nested JSON objects with mostly string values, + where the set of unique string values is relatively small. +* `TwitterStatus` is the JSON response from the Twitter API. + It contains a mix of all different JSON kinds, where string values + are a mix of both single-byte ASCII and multi-byte Unicode. +* `GolangSource` is a simple tree representing the Go source code. + It contains many nested JSON objects, each with the same schema. +* `StringUnicode` contains many strings with multi-byte Unicode runes. + +All of the implementations other than `JSONv1` and `JSONv2` make +extensive use of `unsafe`. As such, we expect those to generally be faster, +but at the cost of memory and type safety. `SonicJSON` goes a step even further +and uses just-in-time compilation to generate machine code specialized +for the Go type being marshaled or unmarshaled. +Also, `SonicJSON` does not validate JSON strings for valid UTF-8, +and so gains a notable performance boost on datasets with multi-byte Unicode. +Benchmarks are performed based on the default marshal and unmarshal behavior +of each package. Note that `JSONv2` aims to be safe and correct by default, +which may not be the most performant strategy. + +`JSONv2` has several semantic changes relative to `JSONv1` that +impacts performance: + +1. When marshaling, `JSONv2` no longer sorts the keys of a Go map. + This will improve performance. +2. When marshaling or unmarshaling, `JSONv2` always checks + to make sure JSON object names are unique. + This will hurt performance, but is more correct. +3. When marshaling or unmarshaling, `JSONv2` always + shallow copies the underlying value for a Go interface and + shallow copies the key and value for entries in a Go map. + This is done to keep the value as addressable so that `JSONv2` can + call methods and functions that operate on a pointer receiver. + This will hurt performance, but is more correct. + +All of the charts are unit-less since the values are normalized +relative to `JSONv1`, which is why `JSONv1` always has a value of 1. +A lower value is better (i.e., runs faster). + +Benchmarks were performed on an AMD Ryzen 9 5900X. + +The code for the benchmarks is located at +https://github.com/go-json-experiment/jsonbench. + +### Marshal Performance + +#### Concrete types + +![Benchmark Marshal Concrete](benchmark-marshal-concrete.png) + +* This compares marshal performance when serializing + [from concrete types](/testdata_test.go). +* The `JSONv1` implementation is close to optimal (without the use of `unsafe`). +* Relative to `JSONv1`, `JSONv2` is generally as fast or slightly faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.3x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.8x slower. +* Relative to `GoJSON`, `JSONv2` is up to 2.0x slower. +* Relative to `SonicJSON`, `JSONv2` is about 1.8x to 3.2x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, marshaling from concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Marshal Interface](benchmark-marshal-interface.png) + +* This compares marshal performance when serializing from + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.5x to 4.2x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 2.4x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.2x to 1.8x faster. +* Relative to `GoJSON`, `JSONv2` is about 1.1x to 2.5x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv2` is faster than the alternatives. + One advantange is because it does not sort the keys for a `map[string]any`, + while alternatives (except `SonicJSON` and `JSONIterator`) do sort the keys. + +#### RawValue types + +![Benchmark Marshal Rawvalue](benchmark-marshal-rawvalue.png) + +* This compares performance when marshaling from a `json.RawValue`. + This mostly exercises the underlying encoder and + hides the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 3.5x to 7.8x faster. +* `JSONIterator` is blazingly fast because + [it does not validate whether the raw value is valid](https://go.dev/play/p/bun9IXQCKRe) + and simply copies it to the output. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5x to 2.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 2.2x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x faster. +* Aside from `JSONIterator`, `JSONv2` is generally the fastest. + +### Unmarshal Performance + +#### Concrete types + +![Benchmark Unmarshal Concrete](benchmark-unmarshal-concrete.png) + +* This compares unmarshal performance when deserializing + [into concrete types](/testdata_test.go). +* Relative to `JSONv1`, `JSONv2` is about 1.8x to 5.7x faster. +* Relative to `JSONIterator`, `JSONv2` is about 1.1x to 1.6x slower. +* Relative to `SegmentJSON`, `JSONv2` is up to 2.5x slower. +* Relative to `GoJSON`, `JSONv2` is about 1.4x to 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 4.0x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* For `JSONv1` and `JSONv2`, unmarshaling into concrete types is + mostly limited by the performance of Go reflection. + +#### Interface types + +![Benchmark Unmarshal Interface](benchmark-unmarshal-interface.png) + +* This compares unmarshal performance when deserializing into + `any`, `map[string]any`, and `[]any` types. +* Relative to `JSONv1`, `JSONv2` is about 1.tx to 4.3x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 1.5x faster. +* Relative to `SegmentJSON`, `JSONv2` is about 1.5 to 3.7x faster. +* Relative to `GoJSON`, `JSONv2` is up to 1.3x faster. +* Relative to `SonicJSON`, `JSONv2` is up to 1.5x slower + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* Aside from `SonicJSON`, `JSONv2` is generally just as fast + or faster than all the alternatives. + +#### RawValue types + +![Benchmark Unmarshal Rawvalue](benchmark-unmarshal-rawvalue.png) + +* This compares performance when unmarshaling into a `json.RawValue`. + This mostly exercises the underlying decoder and + hides away most of the cost of Go reflection. +* Relative to `JSONv1`, `JSONv2` is about 8.3x to 17.0x faster. +* Relative to `JSONIterator`, `JSONv2` is up to 2.0x faster. +* Relative to `SegmentJSON`, `JSONv2` is up to 1.6x faster or 1.7x slower. +* Relative to `GoJSON`, `JSONv2` is up to 1.9x faster or 2.1x slower. +* Relative to `SonicJSON`, `JSONv2` is up to 2.0x faster + (ignoring `StringUnicode` since `SonicJSON` does not validate UTF-8). +* `JSONv1` takes a + [lexical scanning approach](https://talks.golang.org/2011/lex.slide#1), + which performs a virtual function call for every byte of input. + In contrast, `JSONv2` makes heavy use of iterative and linear parsing logic + (with extra complexity to resume parsing when encountering segmented buffers). +* `JSONv2` is comparable to the alternatives that use `unsafe`. + Generally it is faster, but sometimes it is slower. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go new file mode 100644 index 000000000..e6c6216ff --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -0,0 +1,513 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "io" + "reflect" + "sync" +) + +// MarshalOptions configures how Go data is serialized as JSON data. +// The zero value is equivalent to the default marshal settings. +type MarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Marshalers is a list of type-specific marshalers to use. + Marshalers *Marshalers + + // StringifyNumbers specifies that numeric Go types should be serialized + // as a JSON string containing the equivalent JSON number value. + // + // According to RFC 8259, section 6, a JSON implementation may choose to + // limit the representation of a JSON number to an IEEE 754 binary64 value. + // This may cause decoders to lose precision for int64 and uint64 types. + // Escaping JSON numbers as a JSON string preserves the exact precision. + StringifyNumbers bool + + // DiscardUnknownMembers specifies that marshaling should ignore any + // JSON object members stored in Go struct fields dedicated to storing + // unknown JSON object members. + DiscardUnknownMembers bool + + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Marshal serializes a Go value as a []byte with default options. +// It is a thin wrapper over MarshalOptions.Marshal. +func Marshal(in any) (out []byte, err error) { + return MarshalOptions{}.Marshal(EncodeOptions{}, in) +} + +// MarshalFull serializes a Go value into an io.Writer with default options. +// It is a thin wrapper over MarshalOptions.MarshalFull. +func MarshalFull(out io.Writer, in any) error { + return MarshalOptions{}.MarshalFull(EncodeOptions{}, out, in) +} + +// Marshal serializes a Go value as a []byte according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) Marshal(eo EncodeOptions, in any) (out []byte, err error) { + enc := getBufferedEncoder(eo) + defer putBufferedEncoder(enc) + enc.options.omitTopLevelNewline = true + err = mo.MarshalNext(enc, in) + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + return append([]byte(nil), enc.buf...), err +} + +// MarshalFull serializes a Go value into an io.Writer according to the provided +// marshal and encode options. It does not terminate the output with a newline. +// See MarshalNext for details about the conversion of a Go value into JSON. +func (mo MarshalOptions) MarshalFull(eo EncodeOptions, out io.Writer, in any) error { + enc := getStreamingEncoder(out, eo) + defer putStreamingEncoder(enc) + enc.options.omitTopLevelNewline = true + err := mo.MarshalNext(enc, in) + return err +} + +// MarshalNext encodes a Go value as the next JSON value according to +// the provided marshal options. +// +// Type-specific marshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when encoding +// a value of type T (by taking its address) or a non-nil value of *T. +// MarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. For performance, +// it is recommended that MarshalNext be passed a non-nil pointer to the value. +// +// The input value is encoded as JSON according the following rules: +// +// - If any type-specific functions in MarshalOptions.Marshalers match +// the value type, then those functions are called to encode the value. +// If all applicable functions return SkipFunc, +// then the value is encoded according to subsequent rules. +// +// - If the value type implements MarshalerV2, +// then the MarshalNextJSON method is called to encode the value. +// +// - If the value type implements MarshalerV1, +// then the MarshalJSON method is called to encode the value. +// +// - If the value type implements encoding.TextMarshaler, +// then the MarshalText method is called to encode the value and +// subsequently encode its result as a JSON string. +// +// - Otherwise, the value is encoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// +// The representation of each type is as follows: +// +// - A Go boolean is encoded as a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is encoded as a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is encoded as a JSON string containing +// the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the bytes value is encoded as a JSON array +// where each byte is recursively JSON-encoded as each JSON array element. +// +// - A Go integer is encoded as a JSON number without fractions or exponents. +// If MarshalOptions.StringifyNumbers is specified, then the JSON number is +// encoded within a JSON string. It does not support any custom format +// flags. +// +// - A Go float is encoded as a JSON number. +// If MarshalOptions.StringifyNumbers is specified, +// then the JSON number is encoded within a JSON string. +// If the format is "nonfinite", then NaN, +Inf, and -Inf are encoded as +// the JSON strings "NaN", "Infinity", and "-Infinity", respectively. +// Otherwise, the presence of non-finite numbers results in a SemanticError. +// +// - A Go map is encoded as a JSON object, where each Go map key and value +// is recursively encoded as a name and value pair in the JSON object. +// The Go map key must encode as a JSON string, otherwise this results +// in a SemanticError. When encoding keys, MarshalOptions.StringifyNumbers +// is automatically applied so that numeric keys encode as JSON strings. +// The Go map is traversed in a non-deterministic order. +// For deterministic encoding, consider using RawValue.Canonicalize. +// If the format is "emitnull", then a nil map is encoded as a JSON null. +// Otherwise by default, a nil map is encoded as an empty JSON object. +// +// - A Go struct is encoded as a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is encoded as a JSON array, where each Go slice element +// is recursively JSON-encoded as the elements of the JSON array. +// If the format is "emitnull", then a nil slice is encoded as a JSON null. +// Otherwise by default, a nil slice is encoded as an empty JSON array. +// +// - A Go array is encoded as a JSON array, where each Go array element +// is recursively JSON-encoded as the elements of the JSON array. +// The JSON array length is always identical to the Go array length. +// It does not support any custom format flags. +// +// - A Go pointer is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// Format flags are forwarded to the encoding of the underlying value. +// +// - A Go interface is encoded as a JSON null if nil, otherwise it is +// the recursively JSON-encoded representation of the underlying value. +// It does not support any custom format flags. +// +// - A Go time.Time is encoded as a JSON string containing the timestamp +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared +// in the time package (e.g., RFC1123), then that format is used. +// Otherwise, the format is used as-is with time.Time.Format if non-empty. +// +// - A Go time.Duration is encoded as a JSON string containing the duration +// formatted according to time.Duration.String. +// If the format is "nanos", it is encoded as a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// JSON cannot represent cyclic data structures and +// MarshalNext does not handle them. +// Passing cyclic structures will result in an error. +func (mo MarshalOptions) MarshalNext(out *Encoder, in any) error { + v := reflect.ValueOf(in) + if !v.IsValid() || (v.Kind() == reflect.Pointer && v.IsNil()) { + return out.WriteToken(Null) + } + // Shallow copy non-pointer values to obtain an addressable value. + // It is beneficial to performance to always pass pointers to avoid this. + if v.Kind() != reflect.Pointer { + v2 := reflect.New(v.Type()) + v2.Elem().Set(v) + v = v2 + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the marshal function for this type. + marshal := lookupArshaler(t).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t) + } + if err := marshal(mo, out, va); err != nil { + if !out.options.AllowDuplicateNames { + out.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// UnmarshalOptions configures how JSON data is deserialized as Go data. +// The zero value is equivalent to the default unmarshal settings. +type UnmarshalOptions struct { + requireKeyedLiterals + nonComparable + + // Unmarshalers is a list of type-specific unmarshalers to use. + Unmarshalers *Unmarshalers + + // StringifyNumbers specifies that numeric Go types can be deserialized + // from either a JSON number or a JSON string containing a JSON number + // without any surrounding whitespace. + StringifyNumbers bool + + // RejectUnknownMembers specifies that unknown members should be rejected + // when unmarshaling a JSON object, regardless of whether there is a field + // to store unknown members. + RejectUnknownMembers bool + + // formatDepth is the depth at which we respect the format flag. + formatDepth int + // format is custom formatting for the value at the specified depth. + format string +} + +// Unmarshal deserializes a Go value from a []byte with default options. +// It is a thin wrapper over UnmarshalOptions.Unmarshal. +func Unmarshal(in []byte, out any) error { + return UnmarshalOptions{}.Unmarshal(DecodeOptions{}, in, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader with default options. +// It is a thin wrapper over UnmarshalOptions.UnmarshalFull. +func UnmarshalFull(in io.Reader, out any) error { + return UnmarshalOptions{}.UnmarshalFull(DecodeOptions{}, in, out) +} + +// Unmarshal deserializes a Go value from a []byte according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) Unmarshal(do DecodeOptions, in []byte, out any) error { + dec := getBufferedDecoder(in, do) + defer putBufferedDecoder(dec) + return uo.unmarshalFull(dec, out) +} + +// UnmarshalFull deserializes a Go value from an io.Reader according to the +// provided unmarshal and decode options. The output must be a non-nil pointer. +// The input must be a single JSON value with optional whitespace interspersed. +// It consumes the entirety of io.Reader until io.EOF is encountered. +// See UnmarshalNext for details about the conversion of JSON into a Go value. +func (uo UnmarshalOptions) UnmarshalFull(do DecodeOptions, in io.Reader, out any) error { + dec := getStreamingDecoder(in, do) + defer putStreamingDecoder(dec) + return uo.unmarshalFull(dec, out) +} +func (uo UnmarshalOptions) unmarshalFull(in *Decoder, out any) error { + switch err := uo.UnmarshalNext(in, out); err { + case nil: + return in.checkEOF() + case io.EOF: + return io.ErrUnexpectedEOF + default: + return err + } +} + +// UnmarshalNext decodes the next JSON value into a Go value according to +// the provided unmarshal options. The output must be a non-nil pointer. +// +// Type-specific unmarshal functions and methods take precedence +// over the default representation of a value. +// Functions or methods that operate on *T are only called when decoding +// a value of type T (by taking its address) or a non-nil value of *T. +// UnmarshalNext ensures that a value is always addressable +// (by boxing it on the heap if necessary) so that +// these functions and methods can be consistently called. +// +// The input is decoded into the output according the following rules: +// +// - If any type-specific functions in UnmarshalOptions.Unmarshalers match +// the value type, then those functions are called to decode the JSON +// value. If all applicable functions return SkipFunc, +// then the input is decoded according to subsequent rules. +// +// - If the value type implements UnmarshalerV2, +// then the UnmarshalNextJSON method is called to decode the JSON value. +// +// - If the value type implements UnmarshalerV1, +// then the UnmarshalJSON method is called to decode the JSON value. +// +// - If the value type implements encoding.TextUnmarshaler, +// then the input is decoded as a JSON string and +// the UnmarshalText method is called with the decoded string value. +// This fails with a SemanticError if the input is not a JSON string. +// +// - Otherwise, the JSON value is decoded according to the value's type +// as described in detail below. +// +// Most Go types have a default JSON representation. +// Certain types support specialized formatting according to +// a format flag optionally specified in the Go struct tag +// for the struct field that contains the current value +// (see the “JSON Representation of Go structs” section for more details). +// A JSON null may be decoded into every supported Go value where +// it is equivalent to storing the zero value of the Go value. +// If the input JSON kind is not handled by the current Go value type, +// then this fails with a SemanticError. Unless otherwise specified, +// the decoded value replaces any pre-existing value. +// +// The representation of each type is as follows: +// +// - A Go boolean is decoded from a JSON boolean (e.g., true or false). +// It does not support any custom format flags. +// +// - A Go string is decoded from a JSON string. +// It does not support any custom format flags. +// +// - A Go []byte or [N]byte is decoded from a JSON string +// containing the binary value encoded using RFC 4648. +// If the format is "base64" or unspecified, then this uses RFC 4648, section 4. +// If the format is "base64url", then this uses RFC 4648, section 5. +// If the format is "base32", then this uses RFC 4648, section 6. +// If the format is "base32hex", then this uses RFC 4648, section 7. +// If the format is "base16" or "hex", then this uses RFC 4648, section 8. +// If the format is "array", then the Go slice or array is decoded from a +// JSON array where each JSON element is recursively decoded for each byte. +// When decoding into a non-nil []byte, the slice length is reset to zero +// and the decoded input is appended to it. +// When decoding into a [N]byte, the input must decode to exactly N bytes, +// otherwise it fails with a SemanticError. +// +// - A Go integer is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// It fails with a SemanticError if the JSON number +// has a fractional or exponent component. +// It also fails if it overflows the representation of the Go integer type. +// It does not support any custom format flags. +// +// - A Go float is decoded from a JSON number. +// It may also be decoded from a JSON string containing a JSON number +// if UnmarshalOptions.StringifyNumbers is specified. +// The JSON number is parsed as the closest representable Go float value. +// If the format is "nonfinite", then the JSON strings +// "NaN", "Infinity", and "-Infinity" are decoded as NaN, +Inf, and -Inf. +// Otherwise, the presence of such strings results in a SemanticError. +// +// - A Go map is decoded from a JSON object, +// where each JSON object name and value pair is recursively decoded +// as the Go map key and value. When decoding keys, +// UnmarshalOptions.StringifyNumbers is automatically applied so that +// numeric keys can decode from JSON strings. Maps are not cleared. +// If the Go map is nil, then a new map is allocated to decode into. +// If the decoded key matches an existing Go map entry, the entry value +// is reused by decoding the JSON object value into it. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go struct is decoded from a JSON object. +// See the “JSON Representation of Go structs” section +// in the package-level documentation for more details. +// +// - A Go slice is decoded from a JSON array, where each JSON element +// is recursively decoded and appended to the Go slice. +// Before appending into a Go slice, a new slice is allocated if it is nil, +// otherwise the slice length is reset to zero. +// The only supported format is "emitnull" and has no effect when decoding. +// +// - A Go array is decoded from a JSON array, where each JSON array element +// is recursively decoded as each corresponding Go array element. +// Each Go array element is zeroed before decoding into it. +// It fails with a SemanticError if the JSON array does not contain +// the exact same number of elements as the Go array. +// It does not support any custom format flags. +// +// - A Go pointer is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil pointer. +// Otherwise, it allocates a new underlying value if the pointer is nil, +// and recursively JSON decodes into the underlying value. +// Format flags are forwarded to the decoding of the underlying type. +// +// - A Go interface is decoded based on the JSON kind and underlying Go type. +// If the input is a JSON null, then this stores a nil interface value. +// Otherwise, a nil interface value of an empty interface type is initialized +// with a zero Go bool, string, float64, map[string]any, or []any if the +// input is a JSON boolean, string, number, object, or array, respectively. +// If the interface value is still nil, then this fails with a SemanticError +// since decoding could not determine an appropriate Go type to decode into. +// For example, unmarshaling into a nil io.Reader fails since +// there is no concrete type to populate the interface value with. +// Otherwise an underlying value exists and it recursively decodes +// the JSON input into it. It does not support any custom format flags. +// +// - A Go time.Time is decoded from a JSON string containing the time +// formatted in RFC 3339 with nanosecond resolution. +// If the format matches one of the format constants declared in +// the time package (e.g., RFC1123), then that format is used for parsing. +// Otherwise, the format is used as-is with time.Time.Parse if non-empty. +// +// - A Go time.Duration is decoded from a JSON string by +// passing the decoded string to time.ParseDuration. +// If the format is "nanos", it is instead decoded from a JSON number +// containing the number of nanoseconds in the duration. +// +// - All other Go types (e.g., complex numbers, channels, and functions) +// have no default representation and result in a SemanticError. +// +// In general, unmarshaling follows merge semantics (similar to RFC 7396) +// where the decoded Go value replaces the destination value +// for any JSON kind other than an object. +// For JSON objects, the input object is merged into the destination value +// where matching object members recursively apply merge semantics. +func (uo UnmarshalOptions) UnmarshalNext(in *Decoder, out any) error { + v := reflect.ValueOf(out) + if !v.IsValid() || v.Kind() != reflect.Pointer || v.IsNil() { + var t reflect.Type + if v.IsValid() { + t = v.Type() + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + } + err := errors.New("value must be passed as a non-nil pointer reference") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + va := addressableValue{v.Elem()} // dereferenced pointer is always addressable + t := va.Type() + + // Lookup and call the unmarshal function for this type. + unmarshal := lookupArshaler(t).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t) + } + if err := unmarshal(uo, in, va); err != nil { + if !in.options.AllowDuplicateNames { + in.tokens.invalidateDisabledNamespaces() + } + return err + } + return nil +} + +// addressableValue is a reflect.Value that is guaranteed to be addressable +// such that calling the Addr and Set methods do not panic. +// +// There is no compile magic that enforces this property, +// but rather the need to construct this type makes it easier to examine each +// construction site to ensure that this property is upheld. +type addressableValue struct{ reflect.Value } + +// newAddressableValue constructs a new addressable value of type t. +func newAddressableValue(t reflect.Type) addressableValue { + return addressableValue{reflect.New(t).Elem()} +} + +// All marshal and unmarshal behavior is implemented using these signatures. +type ( + marshaler = func(MarshalOptions, *Encoder, addressableValue) error + unmarshaler = func(UnmarshalOptions, *Decoder, addressableValue) error +) + +type arshaler struct { + marshal marshaler + unmarshal unmarshaler + nonDefault bool +} + +var lookupArshalerCache sync.Map // map[reflect.Type]*arshaler + +func lookupArshaler(t reflect.Type) *arshaler { + if v, ok := lookupArshalerCache.Load(t); ok { + return v.(*arshaler) + } + + fncs := makeDefaultArshaler(t) + fncs = makeMethodArshaler(fncs, t) + fncs = makeTimeArshaler(fncs, t) + + // Use the last stored so that duplicate arshalers can be garbage collected. + v, _ := lookupArshalerCache.LoadOrStore(t, fncs) + return v.(*arshaler) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go new file mode 100644 index 000000000..c62b1f320 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -0,0 +1,238 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import "reflect" + +// This files contains an optimized marshal and unmarshal implementation +// for the any type. This type is often used when the Go program has +// no knowledge of the JSON schema. This is a common enough occurrence +// to justify the complexity of adding logic for this. + +func marshalValueAny(mo MarshalOptions, enc *Encoder, val any) error { + switch val := val.(type) { + case nil: + return enc.WriteToken(Null) + case bool: + return enc.WriteToken(Bool(val)) + case string: + return enc.WriteToken(String(val)) + case float64: + return enc.WriteToken(Float(val)) + case map[string]any: + return marshalObjectAny(mo, enc, val) + case []any: + return marshalArrayAny(mo, enc, val) + default: + v := newAddressableValue(reflect.TypeOf(val)) + v.Set(reflect.ValueOf(val)) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + return marshal(mo, enc, v) + } +} + +func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { + switch k := dec.PeekKind(); k { + case '{': + return unmarshalObjectAny(uo, dec) + case '[': + return unmarshalArrayAny(uo, dec) + default: + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return nil, err + } + switch val.Kind() { + case 'n': + return nil, nil + case 'f': + return false, nil + case 't': + return true, nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + return dec.stringCache.make(val), nil + case '0': + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid + return fv, nil + default: + panic("BUG: invalid kind: " + k.String()) + } + } +} + +func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(obj) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty map without any preceding whitespace. + if len(obj) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !enc.options.AllowInvalidUTF8 { + enc.tokens.last.disableNamespace() + } + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } + } + putStrings(names) + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil +} + +func unmarshalObjectAny(uo UnmarshalOptions, dec *Decoder) (map[string]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '{': + obj := make(map[string]any) + // A Go map guarantees that each entry has a unique key + // The only possibility of duplicates is due to invalid UTF-8. + if !dec.options.AllowInvalidUTF8 { + dec.tokens.last.disableNamespace() + } + for dec.PeekKind() != '}' { + tok, err := dec.ReadToken() + if err != nil { + return obj, err + } + name := tok.String() + + // Manually check for duplicate names. + if _, ok := obj[name]; ok { + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return obj, err.withOffset(dec.InputOffset() - int64(len(name))) + } + + val, err := unmarshalValueAny(uo, dec) + obj[name] = val + if err != nil { + return obj, err + } + } + if _, err := dec.ReadToken(); err != nil { + return obj, err + } + return obj, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: mapStringAnyType} +} + +func marshalArrayAny(mo MarshalOptions, enc *Encoder, arr []any) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + v := reflect.ValueOf(arr) + if err := enc.seenPointers.visit(v); err != nil { + return err + } + defer enc.seenPointers.leave(v) + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + if len(arr) == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + for _, val := range arr { + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil +} + +func unmarshalArrayAny(uo UnmarshalOptions, dec *Decoder) ([]any, error) { + tok, err := dec.ReadToken() + if err != nil { + return nil, err + } + k := tok.Kind() + switch k { + case 'n': + return nil, nil + case '[': + arr := []any{} + for dec.PeekKind() != ']' { + val, err := unmarshalValueAny(uo, dec) + arr = append(arr, val) + if err != nil { + return arr, err + } + } + if _, err := dec.ReadToken(); err != nil { + return arr, err + } + return arr, nil + } + return nil, &SemanticError{action: "unmarshal", JSONKind: k, GoType: sliceAnyType} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go new file mode 100644 index 000000000..fd26eba35 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -0,0 +1,1485 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "encoding/base32" + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "sync" +) + +// optimizeCommon specifies whether to use optimizations targeted for certain +// common patterns, rather than using the slower, but more general logic. +// All tests should pass regardless of whether this is true or not. +const optimizeCommon = true + +var ( + // Most natural Go type that correspond with each JSON type. + anyType = reflect.TypeOf((*any)(nil)).Elem() // JSON value + boolType = reflect.TypeOf((*bool)(nil)).Elem() // JSON bool + stringType = reflect.TypeOf((*string)(nil)).Elem() // JSON string + float64Type = reflect.TypeOf((*float64)(nil)).Elem() // JSON number + mapStringAnyType = reflect.TypeOf((*map[string]any)(nil)).Elem() // JSON object + sliceAnyType = reflect.TypeOf((*[]any)(nil)).Elem() // JSON array + + bytesType = reflect.TypeOf((*[]byte)(nil)).Elem() + emptyStructType = reflect.TypeOf((*struct{})(nil)).Elem() +) + +const startDetectingCyclesAfter = 1000 + +type seenPointers map[typedPointer]struct{} + +type typedPointer struct { + typ reflect.Type + ptr any // always stores unsafe.Pointer, but avoids depending on unsafe +} + +// visit visits pointer p of type t, reporting an error if seen before. +// If successfully visited, then the caller must eventually call leave. +func (m *seenPointers) visit(v reflect.Value) error { + p := typedPointer{v.Type(), v.UnsafePointer()} + if _, ok := (*m)[p]; ok { + return &SemanticError{action: "marshal", GoType: p.typ, Err: errors.New("encountered a cycle")} + } + if *m == nil { + *m = make(map[typedPointer]struct{}) + } + (*m)[p] = struct{}{} + return nil +} +func (m *seenPointers) leave(v reflect.Value) { + p := typedPointer{v.Type(), v.UnsafePointer()} + delete(*m, p) +} + +func makeDefaultArshaler(t reflect.Type) *arshaler { + switch t.Kind() { + case reflect.Bool: + return makeBoolArshaler(t) + case reflect.String: + return makeStringArshaler(t) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return makeIntArshaler(t) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return makeUintArshaler(t) + case reflect.Float32, reflect.Float64: + return makeFloatArshaler(t) + case reflect.Map: + return makeMapArshaler(t) + case reflect.Struct: + return makeStructArshaler(t) + case reflect.Slice: + fncs := makeSliceArshaler(t) + if t.AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Array: + fncs := makeArrayArshaler(t) + if reflect.SliceOf(t.Elem()).AssignableTo(bytesType) { + return makeBytesArshaler(t, fncs) + } + return fncs + case reflect.Pointer: + return makePointerArshaler(t) + case reflect.Interface: + return makeInterfaceArshaler(t) + default: + return makeInvalidArshaler(t) + } +} + +func makeBoolArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace. + if optimizeCommon && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, 't') + if va.Bool() { + enc.buf = append(enc.buf, "true"...) + } else { + enc.buf = append(enc.buf, "false"...) + } + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.WriteToken(Bool(va.Bool())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.SetBool(false) + return nil + case 't', 'f': + va.SetBool(tok.Bool()) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeStringArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + return enc.WriteToken(String(va.String())) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetString("") + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if dec.stringCache == nil { + dec.stringCache = new(stringCache) + } + str := dec.stringCache.make(val) + va.SetString(str) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +var ( + encodeBase16 = func(dst, src []byte) { hex.Encode(dst, src) } + encodeBase32 = base32.StdEncoding.Encode + encodeBase32Hex = base32.HexEncoding.Encode + encodeBase64 = base64.StdEncoding.Encode + encodeBase64URL = base64.URLEncoding.Encode + encodedLenBase16 = hex.EncodedLen + encodedLenBase32 = base32.StdEncoding.EncodedLen + encodedLenBase32Hex = base32.HexEncoding.EncodedLen + encodedLenBase64 = base64.StdEncoding.EncodedLen + encodedLenBase64URL = base64.URLEncoding.EncodedLen + decodeBase16 = hex.Decode + decodeBase32 = base32.StdEncoding.Decode + decodeBase32Hex = base32.HexEncoding.Decode + decodeBase64 = base64.StdEncoding.Decode + decodeBase64URL = base64.URLEncoding.Decode + decodedLenBase16 = hex.DecodedLen + decodedLenBase32 = base32.StdEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase32Hex = base32.HexEncoding.WithPadding(base32.NoPadding).DecodedLen + decodedLenBase64 = base64.StdEncoding.WithPadding(base64.NoPadding).DecodedLen + decodedLenBase64URL = base64.URLEncoding.WithPadding(base64.NoPadding).DecodedLen +) + +func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { + // NOTE: This handles both []byte and [N]byte. + marshalDefault := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + encode, encodedLen := encodeBase64, encodedLenBase64 + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + switch mo.format { + case "base64": + encode, encodedLen = encodeBase64, encodedLenBase64 + case "base64url": + encode, encodedLen = encodeBase64URL, encodedLenBase64URL + case "base32": + encode, encodedLen = encodeBase32, encodedLenBase32 + case "base32hex": + encode, encodedLen = encodeBase32Hex, encodedLenBase32Hex + case "base16", "hex": + encode, encodedLen = encodeBase16, encodedLenBase16 + case "array": + mo.format = "" + return marshalDefault(mo, enc, va) + default: + return newInvalidFormatError("marshal", t, mo.format) + } + } + val := enc.UnusedBuffer() + b := va.Bytes() + n := len(`"`) + encodedLen(len(b)) + len(`"`) + if cap(val) < n { + val = make([]byte, n) + } else { + val = val[:n] + } + val[0] = '"' + encode(val[len(`"`):len(val)-len(`"`)], b) + val[len(val)-1] = '"' + return enc.WriteValue(val) + } + unmarshalDefault := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + switch uo.format { + case "base64": + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 + case "base64url": + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL + case "base32": + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 + case "base32hex": + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex + case "base16", "hex": + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 + case "array": + uo.format = "" + return unmarshalDefault(uo, dec, va) + default: + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + + // For base64 and base32, decodedLen computes the maximum output size + // when given the original input size. To compute the exact size, + // adjust the input size by excluding trailing padding characters. + // This is unnecessary for base16, but also harmless. + n := len(val) + for n > 0 && val[n-1] == '=' { + n-- + } + n = decodedLen(n) + b := va.Bytes() + if va.Kind() == reflect.Array { + if n != len(b) { + err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + } else { + if b == nil || cap(b) < n { + b = make([]byte, n) + } else { + b = b[:n] + } + } + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if va.Kind() == reflect.Slice { + va.SetBytes(b) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return fncs +} + +func makeIntArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendInt(enc.buf, va.Int(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(uint64(va.Int())) + return enc.writeNumber(x, rawIntNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetInt(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + var negOffset int + neg := val[0] == '-' + if neg { + negOffset = 1 + } + n, ok := parseDecUint(val[negOffset:]) + maxInt := uint64(1) << (bits - 1) + overflow := (neg && n > maxInt) || (!neg && n > maxInt-1) + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as signed integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + if neg { + va.SetInt(int64(-n)) + } else { + va.SetInt(int64(+n)) + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeUintArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = strconv.AppendUint(enc.buf, va.Uint(), 10) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + x := math.Float64frombits(va.Uint()) + return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetUint(0) + return nil + case '"': + if !uo.StringifyNumbers { + break + } + val = unescapeStringMayCopy(val, flags.isVerbatim()) + fallthrough + case '0': + n, ok := parseDecUint(val) + maxUint := uint64(1) << bits + overflow := n > maxUint-1 + if !ok { + if n != math.MaxUint64 { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + overflow = true + } + if overflow { + err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + va.SetUint(n) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeFloatArshaler(t reflect.Type) *arshaler { + var fncs arshaler + bits := t.Bits() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + var allowNonFinite bool + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + fv := va.Float() + if math.IsNaN(fv) || math.IsInf(fv, 0) { + if !allowNonFinite { + err := fmt.Errorf("invalid value: %v", fv) + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteToken(Float(fv)) + } + + // Optimize for marshaling without preceding whitespace or string escaping. + if optimizeCommon && !enc.options.multiline && !mo.StringifyNumbers && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '0') + enc.buf = appendNumber(enc.buf, fv, bits) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + return enc.writeNumber(fv, bits, mo.StringifyNumbers) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var allowNonFinite bool + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nonfinite" { + allowNonFinite = true + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + va.SetFloat(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + if allowNonFinite { + switch string(val) { + case "NaN": + va.SetFloat(math.NaN()) + return nil + case "Infinity": + va.SetFloat(math.Inf(+1)) + return nil + case "-Infinity": + va.SetFloat(math.Inf(-1)) + return nil + } + } + if !uo.StringifyNumbers { + break + } + if n, err := consumeNumber(val); n != len(val) || err != nil { + err := fmt.Errorf("cannot parse %q as JSON number: %w", val, strconv.ErrSyntax) + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + fallthrough + case '0': + // NOTE: Floating-point parsing is by nature a lossy operation. + // We never report an overflow condition since we can always + // round the input to the closest representable finite value. + // For extremely large numbers, the closest value is ±MaxFloat. + fv, _ := parseFloat(val, bits) + va.SetFloat(fv) + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeMapArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // when handling map keys with a unique representation. + + // NOTE: Values retrieved from a map are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the map afterwards. + + var fncs arshaler + var ( + once sync.Once + keyFncs *arshaler + valFncs *arshaler + ) + init := func() { + keyFncs = lookupArshaler(t.Key()) + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty map without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '{') + enc.buf = append(enc.buf, "{}"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + if n > 0 { + // Handle maps with numeric key types by stringifying them. + mko := mo + mko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + marshalKey := keyFncs.marshal + marshalVal := valFncs.marshal + if mo.Marshalers != nil { + var ok bool + marshalKey, ok = mo.Marshalers.lookup(marshalKey, t.Key()) + marshalVal, _ = mo.Marshalers.lookup(marshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // A Go map guarantees that each entry has a unique key. + // As such, disable the expensive duplicate name check if we know + // that every Go key will serialize as a unique JSON string. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), enc.options.AllowInvalidUTF8) { + enc.tokens.last.disableNamespace() + } + + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if va.IsNil() { + va.Set(reflect.MakeMap(t)) + } + + // Handle maps with numeric key types by stringifying them. + uko := uo + uko.StringifyNumbers = true + + nonDefaultKey := keyFncs.nonDefault + unmarshalKey := keyFncs.unmarshal + unmarshalVal := valFncs.unmarshal + if uo.Unmarshalers != nil { + var ok bool + unmarshalKey, ok = uo.Unmarshalers.lookup(unmarshalKey, t.Key()) + unmarshalVal, _ = uo.Unmarshalers.lookup(unmarshalVal, t.Elem()) + nonDefaultKey = nonDefaultKey || ok + } + k := newAddressableValue(t.Key()) + v := newAddressableValue(t.Elem()) + + // Manually check for duplicate entries by virtue of whether the + // unmarshaled key already exists in the destination Go map. + // Consequently, syntactically different names (e.g., "0" and "-0") + // will be rejected as duplicates since they semantically refer + // to the same Go value. This is an unusual interaction + // between syntax and semantics, but is more correct. + if !nonDefaultKey && mapKeyWithUniqueRepresentation(k.Kind(), dec.options.AllowInvalidUTF8) { + dec.tokens.last.disableNamespace() + } + + // In the rare case where the map is not already empty, + // then we need to manually track which keys we already saw + // since existing presence alone is insufficient to indicate + // whether the input had a duplicate name. + var seen reflect.Value + if !dec.options.AllowDuplicateNames && va.Len() > 0 { + seen = reflect.MakeMap(reflect.MapOf(k.Type(), emptyStructType)) + } + + for dec.PeekKind() != '}' { + k.Set(reflect.Zero(t.Key())) + if err := unmarshalKey(uko, dec, k); err != nil { + return err + } + if k.Kind() == reflect.Interface && !k.IsNil() && !k.Elem().Type().Comparable() { + err := fmt.Errorf("invalid incomparable key type %v", k.Elem().Type()) + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + + if v2 := va.MapIndex(k.Value); v2.IsValid() { + if !dec.options.AllowDuplicateNames && (!seen.IsValid() || seen.MapIndex(k.Value).IsValid()) { + // TODO: Unread the object name. + name := dec.previousBuffer() + err := &SyntacticError{str: "duplicate name " + string(name) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(name))) + } + v.Set(v2) + } else { + v.Set(reflect.Zero(v.Type())) + } + err := unmarshalVal(uo, dec, v) + va.SetMapIndex(k.Value, v.Value) + if seen.IsValid() { + seen.SetMapIndex(k.Value, reflect.Zero(emptyStructType)) + } + if err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +// mapKeyWithUniqueRepresentation reports whether all possible values of k +// marshal to a different JSON value, and whether all possible JSON values +// that can unmarshal into k unmarshal to different Go values. +// In other words, the representation must be a bijective. +func mapKeyWithUniqueRepresentation(k reflect.Kind, allowInvalidUTF8 bool) bool { + switch k { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return true + case reflect.String: + // For strings, we have to be careful since names with invalid UTF-8 + // maybe unescape to the same Go string value. + return !allowInvalidUTF8 + default: + // Floating-point kinds are not listed above since NaNs + // can appear multiple times and all serialize as "NaN". + return false + } +} + +func makeStructArshaler(t reflect.Type) *arshaler { + // NOTE: The logic below disables namespaces for tracking duplicate names + // and does the tracking locally with an efficient bit-set based on which + // Go struct fields were seen. + + var fncs arshaler + var ( + once sync.Once + fields structFields + errInit *SemanticError + ) + init := func() { + fields, errInit = makeStructFields(t) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "marshal" + return &err + } + if err := enc.WriteToken(ObjectStart); err != nil { + return err + } + var seenIdxs uintSet + prevIdx := -1 + enc.tokens.last.disableNamespace() // we manually ensure unique names below + for i := range fields.flattened { + f := &fields.flattened[i] + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + continue // implies a nil inlined field + } + } + + // OmitZero skips the field if the Go value is zero, + // which we can determine up front without calling the marshaler. + if f.omitzero && ((f.isZero == nil && v.IsZero()) || (f.isZero != nil && f.isZero(v))) { + continue + } + + marshal := f.fncs.marshal + nonDefault := f.fncs.nonDefault + if mo.Marshalers != nil { + var ok bool + marshal, ok = mo.Marshalers.lookup(marshal, f.typ) + nonDefault = nonDefault || ok + } + + // OmitEmpty skips the field if the marshaled JSON value is empty, + // which we can know up front if there are no custom marshalers, + // otherwise we must marshal the value and unwrite it if empty. + if f.omitempty && !nonDefault && f.isEmpty != nil && f.isEmpty(v) { + continue // fast path for omitempty + } + + // Write the object member name. + // + // The logic below is semantically equivalent to: + // enc.WriteToken(String(f.name)) + // but specialized and simplified because: + // 1. The Encoder must be expecting an object name. + // 2. The object namespace is guaranteed to be disabled. + // 3. The object name is guaranteed to be valid and pre-escaped. + // 4. There is no need to flush the buffer (for unwrite purposes). + // 5. There is no possibility of an error occurring. + if optimizeCommon { + // Append any delimiters or optional whitespace. + if enc.tokens.last.length() > 0 { + enc.buf = append(enc.buf, ',') + } + if enc.options.multiline { + enc.buf = enc.appendIndent(enc.buf, enc.tokens.needIndent('"')) + } + + // Append the token to the output and to the state machine. + n0 := len(enc.buf) // offset before calling appendString + if enc.options.EscapeRune == nil { + enc.buf = append(enc.buf, f.quotedName...) + } else { + enc.buf, _ = appendString(enc.buf, f.name, false, enc.options.EscapeRune) + } + if !enc.options.AllowDuplicateNames { + enc.names.replaceLastQuotedOffset(n0) + } + enc.tokens.last.increment() + } else { + if err := enc.WriteToken(String(f.name)); err != nil { + return err + } + } + + // Write the object member value. + mo2 := mo + if f.string { + mo2.StringifyNumbers = true + } + if f.format != "" { + mo2.formatDepth = enc.tokens.depth() + mo2.format = f.format + } + if err := marshal(mo2, enc, v); err != nil { + return err + } + + // Try unwriting the member if empty (slow path for omitempty). + if f.omitempty { + var prevName *string + if prevIdx >= 0 { + prevName = &fields.flattened[prevIdx].name + } + if enc.unwriteEmptyObjectMember(prevName) { + continue + } + } + + // Remember the previous written object member. + // The set of seen fields only needs to be updated to detect + // duplicate names with those from the inlined fallback. + if !enc.options.AllowDuplicateNames && fields.inlinedFallback != nil { + seenIdxs.insert(uint(f.id)) + } + prevIdx = f.id + } + if fields.inlinedFallback != nil && !(mo.DiscardUnknownMembers && fields.inlinedFallback.unknown) { + var insertUnquotedName func([]byte) bool + if !enc.options.AllowDuplicateNames { + insertUnquotedName = func(name []byte) bool { + // Check that the name from inlined fallback does not match + // one of the previously marshaled names from known fields. + if foldedFields := fields.byFoldedName[string(foldName(name))]; len(foldedFields) > 0 { + if f := fields.byActualName[string(name)]; f != nil { + return seenIdxs.insert(uint(f.id)) + } + for _, f := range foldedFields { + if f.nocase { + return seenIdxs.insert(uint(f.id)) + } + } + } + + // Check that the name does not match any other name + // previously marshaled from the inlined fallback. + return enc.namespaces.last().insertUnquoted(name) + } + } + if err := marshalInlinedFallbackAll(mo, enc, va, fields.inlinedFallback, insertUnquotedName); err != nil { + return err + } + } + if err := enc.WriteToken(ObjectEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '{': + once.Do(init) + if errInit != nil { + err := *errInit // shallow copy SemanticError + err.action = "unmarshal" + return &err + } + var seenIdxs uintSet + dec.tokens.last.disableNamespace() + for dec.PeekKind() != '}' { + // Process the object member name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err + } + name := unescapeStringMayCopy(val, flags.isVerbatim()) + f := fields.byActualName[string(name)] + if f == nil { + for _, f2 := range fields.byFoldedName[string(foldName(name))] { + if f2.nocase { + f = f2 + break + } + } + if f == nil { + if uo.RejectUnknownMembers && (fields.inlinedFallback == nil || fields.inlinedFallback.unknown) { + return &SemanticError{action: "unmarshal", GoType: t, Err: fmt.Errorf("unknown name %s", val)} + } + if !dec.options.AllowDuplicateNames && !dec.namespaces.last().insertUnquoted(name) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + if fields.inlinedFallback == nil { + // Skip unknown value since we have no place to store it. + if err := dec.SkipValue(); err != nil { + return err + } + } else { + // Marshal into value capable of storing arbitrary object members. + if err := unmarshalInlinedFallbackNext(uo, dec, va, fields.inlinedFallback, val, name); err != nil { + return err + } + } + continue + } + } + if !dec.options.AllowDuplicateNames && !seenIdxs.insert(uint(f.id)) { + // TODO: Unread the object name. + err := &SyntacticError{str: "duplicate name " + string(val) + " in object"} + return err.withOffset(dec.InputOffset() - int64(len(val))) + } + + // Process the object member value. + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, f.typ) + } + uo2 := uo + if f.string { + uo2.StringifyNumbers = true + } + if f.format != "" { + uo2.formatDepth = dec.tokens.depth() + uo2.format = f.format + } + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + if err := unmarshal(uo2, dec, v); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func (va addressableValue) fieldByIndex(index []int, mayAlloc bool) addressableValue { + for _, i := range index { + va = va.indirect(mayAlloc) + if !va.IsValid() { + return va + } + va = addressableValue{va.Field(i)} // addressable if struct value is addressable + } + return va +} + +func (va addressableValue) indirect(mayAlloc bool) addressableValue { + if va.Kind() == reflect.Pointer { + if va.IsNil() { + if !mayAlloc { + return addressableValue{} + } + va.Set(reflect.New(va.Type().Elem())) + } + va = addressableValue{va.Elem()} // dereferenced pointer is always addressable + } + return va +} + +func makeSliceArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "emitnull" { + if va.IsNil() { + return enc.WriteToken(Null) + } + mo.format = "" + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + // Optimize for marshaling an empty slice without any preceding whitespace. + n := va.Len() + if optimizeCommon && n == 0 && !enc.options.multiline && !enc.tokens.last.needObjectName() { + enc.buf = enc.tokens.mayAppendDelim(enc.buf, '[') + enc.buf = append(enc.buf, "[]"...) + enc.tokens.last.increment() + if enc.needFlush() { + return enc.flush() + } + return nil + } + + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + emptySlice := reflect.MakeSlice(t, 0, 0) + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "emitnull" { + uo.format = "" // only relevant for marshaling + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + mustZero := true // we do not know the cleanliness of unused capacity + cap := va.Cap() + if cap > 0 { + va.SetLen(cap) + } + var i int + for dec.PeekKind() != ']' { + if i == cap { + // TODO(https://go.dev/issue/48000): Use reflect.Value.Append. + va.Set(reflect.Append(va.Value, reflect.Zero(t.Elem()))) + cap = va.Cap() + va.SetLen(cap) + mustZero = false // append guarantees that unused capacity is zero-initialized + } + v := addressableValue{va.Index(i)} // indexed slice element is always addressable + i++ + if mustZero { + v.Set(reflect.Zero(t.Elem())) + } + if err := unmarshal(uo, dec, v); err != nil { + va.SetLen(i) + return err + } + } + if i == 0 { + va.Set(emptySlice) + } else { + va.SetLen(i) + } + if _, err := dec.ReadToken(); err != nil { + return err + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makeArrayArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + n := t.Len() + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + once.Do(init) + if err := enc.WriteToken(ArrayStart); err != nil { + return err + } + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + for i := 0; i < n; i++ { + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + if err := marshal(mo, enc, v); err != nil { + return err + } + } + if err := enc.WriteToken(ArrayEnd); err != nil { + return err + } + return nil + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + tok, err := dec.ReadToken() + if err != nil { + return err + } + k := tok.Kind() + switch k { + case 'n': + va.Set(reflect.Zero(t)) + return nil + case '[': + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + var i int + for dec.PeekKind() != ']' { + if i >= n { + err := errors.New("too many array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + v := addressableValue{va.Index(i)} // indexed array element is addressable if array is addressable + v.Set(reflect.Zero(v.Type())) + if err := unmarshal(uo, dec, v); err != nil { + return err + } + i++ + } + if _, err := dec.ReadToken(); err != nil { + return err + } + if i < n { + err := errors.New("too few array elements") + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + return &fncs +} + +func makePointerArshaler(t reflect.Type) *arshaler { + var fncs arshaler + var ( + once sync.Once + valFncs *arshaler + ) + init := func() { + valFncs = lookupArshaler(t.Elem()) + } + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + // Check for cycles. + if enc.tokens.depth() > startDetectingCyclesAfter { + if err := enc.seenPointers.visit(va.Value); err != nil { + return err + } + defer enc.seenPointers.leave(va.Value) + } + + // NOTE: MarshalOptions.format is forwarded to underlying marshal. + if va.IsNil() { + return enc.WriteToken(Null) + } + once.Do(init) + marshal := valFncs.marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, t.Elem()) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // NOTE: UnmarshalOptions.format is forwarded to underlying unmarshal. + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + once.Do(init) + unmarshal := valFncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, t.Elem()) + } + if va.IsNil() { + va.Set(reflect.New(t.Elem())) + } + v := addressableValue{va.Elem()} // dereferenced pointer is always addressable + return unmarshal(uo, dec, v) + } + return &fncs +} + +func makeInterfaceArshaler(t reflect.Type) *arshaler { + // NOTE: Values retrieved from an interface are not addressable, + // so we shallow copy the values to make them addressable and + // store them back into the interface afterwards. + + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + return newInvalidFormatError("marshal", t, mo.format) + } + if va.IsNil() { + return enc.WriteToken(Null) + } + v := newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + marshal := lookupArshaler(v.Type()).marshal + if mo.Marshalers != nil { + marshal, _ = mo.Marshalers.lookup(marshal, v.Type()) + } + // Optimize for the any type if there are no special options. + if optimizeCommon && t == anyType && !mo.StringifyNumbers && mo.format == "" && (mo.Marshalers == nil || !mo.Marshalers.fromAny) { + return marshalValueAny(mo, enc, va.Elem().Interface()) + } + return marshal(mo, enc, v) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + return newInvalidFormatError("unmarshal", t, uo.format) + } + if dec.PeekKind() == 'n' { + if _, err := dec.ReadToken(); err != nil { + return err + } + va.Set(reflect.Zero(t)) + return nil + } + var v addressableValue + if va.IsNil() { + // Optimize for the any type if there are no special options. + // We do not care about stringified numbers since JSON strings + // are always unmarshaled into an any value as Go strings. + // Duplicate name check must be enforced since unmarshalValueAny + // does not implement merge semantics. + if optimizeCommon && t == anyType && uo.format == "" && (uo.Unmarshalers == nil || !uo.Unmarshalers.fromAny) && !dec.options.AllowDuplicateNames { + v, err := unmarshalValueAny(uo, dec) + // We must check for nil interface values up front. + // See https://go.dev/issue/52310. + if v != nil { + va.Set(reflect.ValueOf(v)) + } + return err + } + + k := dec.PeekKind() + if !isAnyType(t) { + err := errors.New("cannot derive concrete type for non-empty interface") + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + switch k { + case 'f', 't': + v = newAddressableValue(boolType) + case '"': + v = newAddressableValue(stringType) + case '0': + v = newAddressableValue(float64Type) + case '{': + v = newAddressableValue(mapStringAnyType) + case '[': + v = newAddressableValue(sliceAnyType) + default: + // If k is invalid (e.g., due to an I/O or syntax error), then + // that will be cached by PeekKind and returned by ReadValue. + // If k is '}' or ']', then ReadValue must error since + // those are invalid kinds at the start of a JSON value. + _, err := dec.ReadValue() + return err + } + } else { + // Shallow copy the existing value to keep it addressable. + // Any mutations at the top-level of the value will be observable + // since we always store this value back into the interface value. + v = newAddressableValue(va.Elem().Type()) + v.Set(va.Elem()) + } + unmarshal := lookupArshaler(v.Type()).unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, v.Type()) + } + err := unmarshal(uo, dec, v) + va.Set(v.Value) + return err + } + return &fncs +} + +// isAnyType reports wether t is equivalent to the any interface type. +func isAnyType(t reflect.Type) bool { + // This is forward compatible if the Go language permits type sets within + // ordinary interfaces where an interface with zero methods does not + // necessarily mean it can hold every possible Go type. + // See https://go.dev/issue/45346. + return t == anyType || anyType.Implements(t) +} + +func makeInvalidArshaler(t reflect.Type) *arshaler { + var fncs arshaler + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + return &SemanticError{action: "marshal", GoType: t} + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + return &SemanticError{action: "unmarshal", GoType: t} + } + return &fncs +} + +func newInvalidFormatError(action string, t reflect.Type, format string) error { + err := fmt.Errorf("invalid format flag: %q", format) + return &SemanticError{action: action, GoType: t, Err: err} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go new file mode 100644 index 000000000..8a4e70083 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_funcs.go @@ -0,0 +1,387 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "sync" +) + +// SkipFunc may be returned by MarshalFuncV2 and UnmarshalFuncV2 functions. +// +// Any function that returns SkipFunc must not cause observable side effects +// on the provided Encoder or Decoder. For example, it is permissible to call +// Decoder.PeekKind, but not permissible to call Decoder.ReadToken or +// Encoder.WriteToken since such methods mutate the state. +const SkipFunc = jsonError("skip function") + +// Marshalers is a list of functions that may override the marshal behavior +// of specific types. Populate MarshalOptions.Marshalers to use it. +// A nil *Marshalers is equivalent to an empty list. +type Marshalers = typedMarshalers + +// NewMarshalers constructs a flattened list of marshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default marshaling behavior is used. +// +// For example: +// +// m1 := NewMarshalers(f1, f2) +// m2 := NewMarshalers(f0, m1, f3) // equivalent to m3 +// m3 := NewMarshalers(f0, f1, f2, f3) // equivalent to m2 +func NewMarshalers(ms ...*Marshalers) *Marshalers { + return newMarshalers(ms...) +} + +// Unmarshalers is a list of functions that may override the unmarshal behavior +// of specific types. Populate UnmarshalOptions.Unmarshalers to use it. +// A nil *Unmarshalers is equivalent to an empty list. +type Unmarshalers = typedUnmarshalers + +// NewUnmarshalers constructs a flattened list of unmarshal functions. +// If multiple functions in the list are applicable for a value of a given type, +// then those earlier in the list take precedence over those that come later. +// If a function returns SkipFunc, then the next applicable function is called, +// otherwise the default unmarshaling behavior is used. +// +// For example: +// +// u1 := NewUnmarshalers(f1, f2) +// u2 := NewUnmarshalers(f0, u1, f3) // equivalent to u3 +// u3 := NewUnmarshalers(f0, f1, f2, f3) // equivalent to u2 +func NewUnmarshalers(us ...*Unmarshalers) *Unmarshalers { + return newUnmarshalers(us...) +} + +type typedMarshalers = typedArshalers[MarshalOptions, Encoder] +type typedUnmarshalers = typedArshalers[UnmarshalOptions, Decoder] +type typedArshalers[Options, Coder any] struct { + nonComparable + + fncVals []typedArshaler[Options, Coder] + fncCache sync.Map // map[reflect.Type]arshaler + + // fromAny reports whether any of Go types used to represent arbitrary JSON + // (i.e., any, bool, string, float64, map[string]any, or []any) matches + // any of the provided type-specific arshalers. + // + // This bit of information is needed in arshal_default.go to determine + // whether to use the specialized logic in arshal_any.go to handle + // the any interface type. The logic in arshal_any.go does not support + // type-specific arshal functions, so we must avoid using that logic + // if this is true. + fromAny bool +} +type typedMarshaler = typedArshaler[MarshalOptions, Encoder] +type typedUnmarshaler = typedArshaler[UnmarshalOptions, Decoder] +type typedArshaler[Options, Coder any] struct { + typ reflect.Type + fnc func(Options, *Coder, addressableValue) error + maySkip bool +} + +func newMarshalers(ms ...*Marshalers) *Marshalers { return newTypedArshalers(ms...) } +func newUnmarshalers(us ...*Unmarshalers) *Unmarshalers { return newTypedArshalers(us...) } +func newTypedArshalers[Options, Coder any](as ...*typedArshalers[Options, Coder]) *typedArshalers[Options, Coder] { + var a typedArshalers[Options, Coder] + for _, a2 := range as { + if a2 != nil { + a.fncVals = append(a.fncVals, a2.fncVals...) + a.fromAny = a.fromAny || a2.fromAny + } + } + if len(a.fncVals) == 0 { + return nil + } + return &a +} + +func (a *typedArshalers[Options, Coder]) lookup(fnc func(Options, *Coder, addressableValue) error, t reflect.Type) (func(Options, *Coder, addressableValue) error, bool) { + if a == nil { + return fnc, false + } + if v, ok := a.fncCache.Load(t); ok { + if v == nil { + return fnc, false + } + return v.(func(Options, *Coder, addressableValue) error), true + } + + // Collect a list of arshalers that can be called for this type. + // This list may be longer than 1 since some arshalers can be skipped. + var fncs []func(Options, *Coder, addressableValue) error + for _, fncVal := range a.fncVals { + if !castableTo(t, fncVal.typ) { + continue + } + fncs = append(fncs, fncVal.fnc) + if !fncVal.maySkip { + break // subsequent arshalers will never be called + } + } + + if len(fncs) == 0 { + a.fncCache.Store(t, nil) // nil to indicate that no funcs found + return fnc, false + } + + // Construct an arshaler that may call every applicable arshaler. + fncDefault := fnc + fnc = func(o Options, c *Coder, v addressableValue) error { + for _, fnc := range fncs { + if err := fnc(o, c, v); err != SkipFunc { + return err // may be nil or non-nil + } + } + return fncDefault(o, c, v) + } + + // Use the first stored so duplicate work can be garbage collected. + v, _ := a.fncCache.LoadOrStore(t, fnc) + return v.(func(Options, *Coder, addressableValue) error), true +} + +// MarshalFuncV1 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value. +// The value of T must not be retained outside the function call. +// It may not return SkipFunc. +func MarshalFuncV1[T any](fn func(T) ([]byte, error)) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + val, err := fn(va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "marshal function of type func(T) ([]byte, error)") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// MarshalFuncV2 constructs a type-specific marshaler that +// specifies how to marshal values of type T. +// T can be any type except a named pointer. +// The function is always provided with a non-nil pointer value +// if T is an interface or pointer type. +// +// The function must marshal exactly one JSON value by calling write methods +// on the provided encoder. It may return SkipFunc such that marshaling can +// move on to the next marshal function. However, no mutable method calls may +// be called on the encoder if SkipFunc is returned. +// The pointer to Encoder and the value of T must not be retained +// outside the function call. +func MarshalFuncV2[T any](fn func(MarshalOptions, *Encoder, T) error) *Marshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, true) + typFnc := typedMarshaler{ + typ: t, + fnc: func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := fn(mo, enc, va.castTo(t).Interface().(T)) + currDepth, currLength := enc.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not write any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Marshalers{fncVals: []typedMarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV1 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value. +// The input []byte must not be mutated. +// The input []byte and value T must not be retained outside the function call. +// It may not return SkipFunc. +func UnmarshalFuncV1[T any](fn func([]byte, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + err = fn(val, va.castTo(t).Interface().(T)) + if err != nil { + err = wrapSkipFunc(err, "unmarshal function of type func([]byte, T) error") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + }, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// UnmarshalFuncV2 constructs a type-specific unmarshaler that +// specifies how to unmarshal values of type T. +// T must be an unnamed pointer or an interface type. +// The function is always provided with a non-nil pointer value. +// +// The function must unmarshal exactly one JSON value by calling read methods +// on the provided decoder. It may return SkipFunc such that unmarshaling can +// move on to the next unmarshal function. However, no mutable method calls may +// be called on the decoder if SkipFunc is returned. +// The pointer to Decoder and the value of T must not be retained +// outside the function call. +func UnmarshalFuncV2[T any](fn func(UnmarshalOptions, *Decoder, T) error) *Unmarshalers { + t := reflect.TypeOf((*T)(nil)).Elem() + assertCastableTo(t, false) + typFnc := typedUnmarshaler{ + typ: t, + fnc: func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := fn(uo, dec, va.castTo(t).Interface().(T)) + currDepth, currLength := dec.tokens.depthLength() + if err == nil && (prevDepth != currDepth || prevLength+1 != currLength) { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + if err == SkipFunc { + if prevDepth == currDepth && prevLength == currLength { + return SkipFunc + } + err = errors.New("must not read any JSON tokens when skipping") + } + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + }, + maySkip: true, + } + return &Unmarshalers{fncVals: []typedUnmarshaler{typFnc}, fromAny: castableToFromAny(t)} +} + +// assertCastableTo asserts that "to" is a valid type to be casted to. +// These are the Go types that type-specific arshalers may operate upon. +// +// Let AllTypes be the universal set of all possible Go types. +// This function generally asserts that: +// +// len([from for from in AllTypes if castableTo(from, to)]) > 0 +// +// otherwise it panics. +// +// As a special-case if marshal is false, then we forbid any non-pointer or +// non-interface type since it is almost always a bug trying to unmarshal +// into something where the end-user caller did not pass in an addressable value +// since they will not observe the mutations. +func assertCastableTo(to reflect.Type, marshal bool) { + switch to.Kind() { + case reflect.Interface: + return + case reflect.Pointer: + // Only allow unnamed pointers to be consistent with the fact that + // taking the address of a value produces an unnamed pointer type. + if to.Name() == "" { + return + } + default: + // Technically, non-pointer types are permissible for unmarshal. + // However, they are often a bug since the receiver would be immutable. + // Thus, only allow them for marshaling. + if marshal { + return + } + } + if marshal { + panic(fmt.Sprintf("input type %v must be an interface type, an unnamed pointer type, or a non-pointer type", to)) + } else { + panic(fmt.Sprintf("input type %v must be an interface type or an unnamed pointer type", to)) + } +} + +// castableTo checks whether values of type "from" can be casted to type "to". +// Nil pointer or interface "from" values are never considered castable. +// +// This function must be kept in sync with addressableValue.castTo. +func castableTo(from, to reflect.Type) bool { + switch to.Kind() { + case reflect.Interface: + // TODO: This breaks when ordinary interfaces can have type sets + // since interfaces now exist where only the value form of a type (T) + // implements the interface, but not the pointer variant (*T). + // See https://go.dev/issue/45346. + return reflect.PointerTo(from).Implements(to) + case reflect.Pointer: + // Common case for unmarshaling. + // From must be a concrete or interface type. + return reflect.PointerTo(from) == to + default: + // Common case for marshaling. + // From must be a concrete type. + return from == to + } +} + +// castTo casts va to the specified type. +// If the type is an interface, then the underlying type will always +// be a non-nil pointer to a concrete type. +// +// Requirement: castableTo(va.Type(), to) must hold. +func (va addressableValue) castTo(to reflect.Type) reflect.Value { + switch to.Kind() { + case reflect.Interface: + return va.Addr().Convert(to) + case reflect.Pointer: + return va.Addr() + default: + return va.Value + } +} + +// castableToFromAny reports whether "to" can be casted to from any +// of the dynamic types used to represent arbitrary JSON. +func castableToFromAny(to reflect.Type) bool { + for _, from := range []reflect.Type{anyType, boolType, stringType, float64Type, mapStringAnyType, sliceAnyType} { + if castableTo(from, to) { + return true + } + } + return false +} + +func wrapSkipFunc(err error, what string) error { + if err == SkipFunc { + return errors.New(what + " cannot be skipped") + } + return err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go new file mode 100644 index 000000000..258a98247 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -0,0 +1,213 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "reflect" +) + +// This package supports "inlining" a Go struct field, where the contents +// of the serialized field (which must be a JSON object) are treated as if +// they are part of the parent Go struct (which represents a JSON object). +// +// Generally, inlined fields are of a Go struct type, where the fields of the +// nested struct are virtually hoisted up to the parent struct using rules +// similar to how Go embedding works (but operating within the JSON namespace). +// +// However, inlined fields may also be of a Go map type with a string key +// or a RawValue. Such inlined fields are called "fallback" fields since they +// represent any arbitrary JSON object member. Explicitly named fields take +// precedence over the inlined fallback. Only one inlined fallback is allowed. + +var rawValueType = reflect.TypeOf((*RawValue)(nil)).Elem() + +// marshalInlinedFallbackAll marshals all the members in an inlined fallback. +func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableValue, f *structField, insertUnquotedName func([]byte) bool) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], false) + if !v.IsValid() { + return nil // implies a nil inlined field + } + } + v = v.indirect(false) + if !v.IsValid() { + return nil + } + + if v.Type() == rawValueType { + b := v.Interface().(RawValue) + if len(b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + return nil + } + + dec := getBufferedDecoder(b, DecodeOptions{AllowDuplicateNames: true, AllowInvalidUTF8: true}) + defer putBufferedDecoder(dec) + + tok, err := dec.ReadToken() + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if tok.Kind() != '{' { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "marshal", JSONKind: tok.Kind(), GoType: rawValueType, Err: err} + } + for dec.PeekKind() != '}' { + // Parse the JSON object name. + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if insertUnquotedName != nil { + name := unescapeStringMayCopy(val, flags.isVerbatim()) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(val) + " in object"} + } + } + if err := enc.WriteValue(val); err != nil { + return err + } + + // Parse the JSON object value. + val, err = dec.readValue(&flags) + if err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := enc.WriteValue(val); err != nil { + return err + } + } + if _, err := dec.ReadToken(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + if err := dec.checkEOF(); err != nil { + return &SemanticError{action: "marshal", GoType: rawValueType, Err: err} + } + return nil + } else { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { + return nil + } + mk := newAddressableValue(stringType) + mv := newAddressableValue(m.Type().Elem()) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) + if err != nil { + return err + } + if insertUnquotedName != nil { + isVerbatim := bytes.IndexByte(b, '\\') < 0 + name := unescapeStringMayCopy(b, isVerbatim) + if !insertUnquotedName(name) { + return &SyntacticError{str: "duplicate name " + string(b) + " in object"} + } + } + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() + } + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } + } + putStrings(names) + } + return nil + } +} + +// unmarshalInlinedFallbackNext unmarshals only the next member in an inlined fallback. +func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressableValue, f *structField, quotedName, unquotedName []byte) error { + v := addressableValue{va.Field(f.index[0])} // addressable if struct value is addressable + if len(f.index) > 1 { + v = v.fieldByIndex(f.index[1:], true) + } + v = v.indirect(true) + + if v.Type() == rawValueType { + b := v.Addr().Interface().(*RawValue) + if len(*b) == 0 { // TODO: Should this be nil? What if it were all whitespace? + *b = append(*b, '{') + } else { + *b = trimSuffixWhitespace(*b) + if hasSuffixByte(*b, '}') { + // TODO: When merging into an object for the first time, + // should we verify that it is valid? + *b = trimSuffixByte(*b, '}') + *b = trimSuffixWhitespace(*b) + if !hasSuffixByte(*b, ',') && !hasSuffixByte(*b, '{') { + *b = append(*b, ',') + } + } else { + err := errors.New("inlined raw value must be a JSON object") + return &SemanticError{action: "unmarshal", GoType: rawValueType, Err: err} + } + } + *b = append(*b, quotedName...) + *b = append(*b, ':') + rawValue, err := dec.ReadValue() + if err != nil { + return err + } + *b = append(*b, rawValue...) + *b = append(*b, '}') + return nil + } else { + name := string(unquotedName) // TODO: Intern this? + + m := v // must be a map[string]V + if m.IsNil() { + m.Set(reflect.MakeMap(m.Type())) + } + mk := reflect.ValueOf(name) + mv := newAddressableValue(v.Type().Elem()) // TODO: Cache across calls? + if v2 := m.MapIndex(mk); v2.IsValid() { + mv.Set(v2) + } + + unmarshal := f.fncs.unmarshal + if uo.Unmarshalers != nil { + unmarshal, _ = uo.Unmarshalers.lookup(unmarshal, mv.Type()) + } + err := unmarshal(uo, dec, mv) + m.SetMapIndex(mk, mv.Value) + if err != nil { + return err + } + return nil + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go new file mode 100644 index 000000000..20899c868 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -0,0 +1,229 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding" + "errors" + "reflect" +) + +// Interfaces for custom serialization. +var ( + jsonMarshalerV1Type = reflect.TypeOf((*MarshalerV1)(nil)).Elem() + jsonMarshalerV2Type = reflect.TypeOf((*MarshalerV2)(nil)).Elem() + jsonUnmarshalerV1Type = reflect.TypeOf((*UnmarshalerV1)(nil)).Elem() + jsonUnmarshalerV2Type = reflect.TypeOf((*UnmarshalerV2)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// MarshalerV1 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. +// +// It is recommended that implementations return a buffer that is safe +// for the caller to retain and potentially mutate. +type MarshalerV1 interface { + MarshalJSON() ([]byte, error) +} + +// MarshalerV2 is implemented by types that can marshal themselves. +// It is recommended that types implement MarshalerV2 instead of MarshalerV1 +// since this is both more performant and flexible. +// If a type implements both MarshalerV1 and MarshalerV2, +// then MarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default marshal options. +// +// The implementation must write only one JSON value to the Encoder and +// must not retain the pointer to Encoder. +type MarshalerV2 interface { + MarshalNextJSON(MarshalOptions, *Encoder) error + + // TODO: Should users call the MarshalOptions.MarshalNext method or + // should/can they call this method directly? Does it matter? +} + +// UnmarshalerV1 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 unless +// the implementation is trying to avoid a hard dependency on this package. +// +// The input can be assumed to be a valid encoding of a JSON value +// if called from unmarshal functionality in this package. +// UnmarshalJSON must copy the JSON data if it is retained after returning. +// It is recommended that UnmarshalJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain or mutate the input []byte. +type UnmarshalerV1 interface { + UnmarshalJSON([]byte) error +} + +// UnmarshalerV2 is implemented by types that can unmarshal themselves. +// It is recommended that types implement UnmarshalerV2 instead of UnmarshalerV1 +// since this is both more performant and flexible. +// If a type implements both UnmarshalerV1 and UnmarshalerV2, +// then UnmarshalerV2 takes precedence. In such a case, both implementations +// should aim to have equivalent behavior for the default unmarshal options. +// +// The implementation must read only one JSON value from the Decoder. +// It is recommended that UnmarshalNextJSON implement merge semantics when +// unmarshaling into a pre-populated value. +// +// Implementations must not retain the pointer to Decoder. +type UnmarshalerV2 interface { + UnmarshalNextJSON(UnmarshalOptions, *Decoder) error + + // TODO: Should users call the UnmarshalOptions.UnmarshalNext method or + // should/can they call this method directly? Does it matter? +} + +func makeMethodArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Avoid injecting method arshaler on the pointer or interface version + // to avoid ever calling the method on a nil pointer or interface receiver. + // Let it be injected on the value receiver (which is always addressable). + if t.Kind() == reflect.Pointer || t.Kind() == reflect.Interface { + return fncs + } + + // Handle custom marshaler. + switch which, needAddr := implementsWhich(t, jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType); which { + case jsonMarshalerV2Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + prevDepth, prevLength := enc.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(MarshalerV2).MarshalNextJSON(mo, enc) + currDepth, currLength := enc.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must write exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return nil + } + case jsonMarshalerV1Type: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(MarshalerV1) + val, err := marshaler.MarshalJSON() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping semantic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: RawValue(val).Kind(), GoType: t, Err: err} + } + return nil + } + case textMarshalerType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + marshaler := va.addrWhen(needAddr).Interface().(encoding.TextMarshaler) + s, err := marshaler.MarshalText() + if err != nil { + err = wrapSkipFunc(err, "marshal method") + // TODO: Avoid wrapping semantic errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + val := enc.UnusedBuffer() + val, err = appendString(val, string(s), true, nil) + if err != nil { + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + if err := enc.WriteValue(val); err != nil { + // TODO: Avoid wrapping syntactic or I/O errors. + return &SemanticError{action: "marshal", JSONKind: '"', GoType: t, Err: err} + } + return nil + } + } + + // Handle custom unmarshaler. + switch which, needAddr := implementsWhich(t, jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType); which { + case jsonUnmarshalerV2Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + prevDepth, prevLength := dec.tokens.depthLength() + err := va.addrWhen(needAddr).Interface().(UnmarshalerV2).UnmarshalNextJSON(uo, dec) + currDepth, currLength := dec.tokens.depthLength() + if (prevDepth != currDepth || prevLength+1 != currLength) && err == nil { + err = errors.New("must read exactly one JSON value") + } + if err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + return nil + } + case jsonUnmarshalerV1Type: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + val, err := dec.ReadValue() + if err != nil { + return err // must be a syntactic or I/O error + } + unmarshaler := va.addrWhen(needAddr).Interface().(UnmarshalerV1) + if err := unmarshaler.UnmarshalJSON(val); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + case textUnmarshalerType: + fncs.nonDefault = true + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + var flags valueFlags + val, err := dec.readValue(&flags) + if err != nil { + return err // must be a syntactic or I/O error + } + if val.Kind() != '"' { + err = errors.New("JSON value must be string type") + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + s := unescapeStringMayCopy(val, flags.isVerbatim()) + unmarshaler := va.addrWhen(needAddr).Interface().(encoding.TextUnmarshaler) + if err := unmarshaler.UnmarshalText(s); err != nil { + err = wrapSkipFunc(err, "unmarshal method") + // TODO: Avoid wrapping semantic, syntactic, or I/O errors. + return &SemanticError{action: "unmarshal", JSONKind: val.Kind(), GoType: t, Err: err} + } + return nil + } + } + + return fncs +} + +// implementsWhich is like t.Implements(ifaceType) for a list of interfaces, +// but checks whether either t or reflect.PointerTo(t) implements the interface. +// It returns the first interface type that matches and whether a value of t +// needs to be addressed first before it implements the interface. +func implementsWhich(t reflect.Type, ifaceTypes ...reflect.Type) (which reflect.Type, needAddr bool) { + for _, ifaceType := range ifaceTypes { + switch { + case t.Implements(ifaceType): + return ifaceType, false + case reflect.PointerTo(t).Implements(ifaceType): + return ifaceType, true + } + } + return nil, false +} + +// addrWhen returns va.Addr if addr is specified, otherwise it returns itself. +func (va addressableValue) addrWhen(addr bool) reflect.Value { + if addr { + return va.Addr() + } + return va.Value +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go new file mode 100644 index 000000000..fc8d5b007 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -0,0 +1,241 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "reflect" + "strings" + "time" +) + +var ( + timeDurationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + timeTimeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { + // Ideally, time types would implement MarshalerV2 and UnmarshalerV2, + // but that would incur a dependency on package json from package time. + // Given how widely used time is, it is more acceptable that we incur a + // dependency on time from json. + // + // Injecting the arshaling functionality like this will not be identical + // to actually declaring methods on the time types since embedding of the + // time types will not be able to forward this functionality. + switch t { + case timeDurationType: + fncs.nonDefault = true + marshalNanos := fncs.marshal + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + if mo.format == "nanos" { + mo.format = "" + return marshalNanos(mo, enc, va) + } else { + return newInvalidFormatError("marshal", t, mo.format) + } + } + + td := va.Interface().(time.Duration) + b := enc.UnusedBuffer() + b = append(b, '"') + b = append(b, td.String()...) // never contains special characters + b = append(b, '"') + return enc.WriteValue(b) + } + unmarshalNanos := fncs.unmarshal + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + // TODO: Should there be a flag that specifies that we can unmarshal + // from either form since there would be no ambiguity? + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + if uo.format == "nanos" { + uo.format = "" + return unmarshalNanos(uo, dec, va) + } else { + return newInvalidFormatError("unmarshal", t, uo.format) + } + } + + var flags valueFlags + td := va.Addr().Interface().(*time.Duration) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + switch k := val.Kind(); k { + case 'n': + *td = time.Duration(0) + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + td2, err := time.ParseDuration(string(val)) + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *td = td2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + case timeTimeType: + fncs.nonDefault = true + fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { + format := time.RFC3339Nano + isRFC3339 := true + if mo.format != "" && mo.formatDepth == enc.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(mo.format) + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + } + + tt := va.Interface().(time.Time) + b := enc.UnusedBuffer() + b = append(b, '"') + b = tt.AppendFormat(b, format) + b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } + // The format may contain special characters that need escaping. + // Verify that the result is a valid JSON string (common case), + // otherwise escape the string correctly (slower case). + if consumeSimpleString(b) != len(b) { + b, _ = appendString(nil, string(b[len(`"`):len(b)-len(`"`)]), true, nil) + } + return enc.WriteValue(b) + } + fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { + format := time.RFC3339 + isRFC3339 := true + if uo.format != "" && uo.formatDepth == dec.tokens.depth() { + var err error + format, isRFC3339, err = checkTimeFormat(uo.format) + if err != nil { + return &SemanticError{action: "unmarshal", GoType: t, Err: err} + } + } + + var flags valueFlags + tt := va.Addr().Interface().(*time.Time) + val, err := dec.readValue(&flags) + if err != nil { + return err + } + k := val.Kind() + switch k { + case 'n': + *tt = time.Time{} + return nil + case '"': + val = unescapeStringMayCopy(val, flags.isVerbatim()) + tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } + if err != nil { + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} + } + *tt = tt2 + return nil + default: + return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} + } + } + } + return fncs +} + +func checkTimeFormat(format string) (string, bool, error) { + // We assume that an exported constant in the time package will + // always start with an uppercase ASCII letter. + if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { + switch format { + case "ANSIC": + return time.ANSIC, false, nil + case "UnixDate": + return time.UnixDate, false, nil + case "RubyDate": + return time.RubyDate, false, nil + case "RFC822": + return time.RFC822, false, nil + case "RFC822Z": + return time.RFC822Z, false, nil + case "RFC850": + return time.RFC850, false, nil + case "RFC1123": + return time.RFC1123, false, nil + case "RFC1123Z": + return time.RFC1123Z, false, nil + case "RFC3339": + return time.RFC3339, true, nil + case "RFC3339Nano": + return time.RFC3339Nano, true, nil + case "Kitchen": + return time.Kitchen, false, nil + case "Stamp": + return time.Stamp, false, nil + case "StampMilli": + return time.StampMilli, false, nil + case "StampMicro": + return time.StampMicro, false, nil + case "StampNano": + return time.StampNano, false, nil + default: + // Reject any format that is an exported Go identifier in case + // new format constants are added to the time package. + if strings.TrimFunc(format, isLetterOrDigit) == "" { + return "", false, fmt.Errorf("undefined format layout: %v", format) + } + } + } + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go new file mode 100644 index 000000000..0d68b3233 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -0,0 +1,1655 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "math" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: The logic for decoding is complicated by the fact that reading from +// an io.Reader into a temporary buffer means that the buffer may contain a +// truncated portion of some valid input, requiring the need to fetch more data. +// +// This file is structured in the following way: +// +// - consumeXXX functions parse an exact JSON token from a []byte. +// If the buffer appears truncated, then it returns io.ErrUnexpectedEOF. +// The consumeSimpleXXX functions are so named because they only handle +// a subset of the grammar for the JSON token being parsed. +// They do not handle the full grammar to keep these functions inlineable. +// +// - Decoder.consumeXXX methods parse the next JSON token from Decoder.buf, +// automatically fetching more input if necessary. These methods take +// a position relative to the start of Decoder.buf as an argument and +// return the end of the consumed JSON token as a position, +// also relative to the start of Decoder.buf. +// +// - In the event of an I/O errors or state machine violations, +// the implementation avoids mutating the state of Decoder +// (aside from the book-keeping needed to implement Decoder.fetch). +// For this reason, only Decoder.ReadToken and Decoder.ReadValue are +// responsible for updated Decoder.prevStart and Decoder.prevEnd. +// +// - For performance, much of the implementation uses the pattern of calling +// the inlineable consumeXXX functions first, and if more work is necessary, +// then it calls the slower Decoder.consumeXXX methods. +// TODO: Revisit this pattern if the Go compiler provides finer control +// over exactly which calls are inlined or not. + +// DecodeOptions configures how JSON decoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type DecodeOptions struct { + requireKeyedLiterals + nonComparable + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // computational and performance benefits, but breaks compliance with + // RFC 7493, section 2.3. The input will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the decoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool +} + +// Decoder is a streaming decoder for raw JSON tokens and values. +// It is used to read a stream of top-level JSON values, +// each separated by optional whitespace characters. +// +// ReadToken and ReadValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be parsed with the following calls (ignoring errors for brevity): +// +// d.ReadToken() // { +// d.ReadToken() // "name" +// d.ReadToken() // "value" +// d.ReadValue() // "array" +// d.ReadToken() // [ +// d.ReadToken() // null +// d.ReadToken() // false +// d.ReadValue() // true +// d.ReadToken() // 3.14159 +// d.ReadToken() // ] +// d.ReadValue() // "object" +// d.ReadValue() // {"k":"v"} +// d.ReadToken() // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call ReadToken to obtain a +// string token for object names. +type Decoder struct { + state + decodeBuffer + options DecodeOptions + + stringCache *stringCache // only used when unmarshaling +} + +// decodeBuffer is a buffer split into 4 segments: +// +// - buf[0:prevEnd] // already read portion of the buffer +// - buf[prevStart:prevEnd] // previously read value +// - buf[prevEnd:len(buf)] // unread portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +// +// Invariants: +// +// 0 ≤ prevStart ≤ prevEnd ≤ len(buf) ≤ cap(buf) +type decodeBuffer struct { + peekPos int // non-zero if valid offset into buf for start of next token + peekErr error // implies peekPos is -1 + + buf []byte // may alias rd if it is a bytes.Buffer + prevStart int + prevEnd int + + // baseOffset is added to prevStart and prevEnd to obtain + // the absolute offset relative to the start of io.Reader stream. + baseOffset int64 + + rd io.Reader +} + +// NewDecoder constructs a new streaming decoder reading from r. +// +// If r is a bytes.Buffer, then the decoder parses directly from the buffer +// without first copying the contents to an intermediate buffer. +// Additional writes to the buffer must not occur while the decoder is in use. +func NewDecoder(r io.Reader) *Decoder { + return DecodeOptions{}.NewDecoder(r) +} + +// NewDecoder constructs a new streaming decoder reading from r +// configured with the provided options. +func (o DecodeOptions) NewDecoder(r io.Reader) *Decoder { + d := new(Decoder) + o.ResetDecoder(d, r) + return d +} + +// ResetDecoder resets a decoder such that it is reading afresh from r and +// configured with the provided options. +func (o DecodeOptions) ResetDecoder(d *Decoder, r io.Reader) { + if d == nil { + panic("json: invalid nil Decoder") + } + if r == nil { + panic("json: invalid nil io.Reader") + } + d.reset(nil, r, o) +} + +func (d *Decoder) reset(b []byte, r io.Reader, o DecodeOptions) { + d.state.reset() + d.decodeBuffer = decodeBuffer{buf: b, rd: r} + d.options = o +} + +// Reset resets a decoder such that it is reading afresh from r but +// keep any pre-existing decoder options. +func (d *Decoder) Reset(r io.Reader) { + d.options.ResetDecoder(d, r) +} + +var errBufferWriteAfterNext = errors.New("invalid bytes.Buffer.Write call after calling bytes.Buffer.Next") + +// fetch reads at least 1 byte from the underlying io.Reader. +// It returns io.ErrUnexpectedEOF if zero bytes were read and io.EOF was seen. +func (d *Decoder) fetch() error { + if d.rd == nil { + return io.ErrUnexpectedEOF + } + + // Inform objectNameStack that we are about to fetch new buffer content. + d.names.copyQuotedBuffer(d.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := d.rd.(*bytes.Buffer); ok { + switch { + case bb.Len() == 0: + return io.ErrUnexpectedEOF + case len(d.buf) == 0: + d.buf = bb.Next(bb.Len()) // "read" all data in the buffer + return nil + default: + // This only occurs if a partially filled bytes.Buffer was provided + // and more data is written to it while Decoder is reading from it. + // This practice will lead to data corruption since future writes + // may overwrite the contents of the current buffer. + // + // The user is trying to use a bytes.Buffer as a pipe, + // but a bytes.Buffer is poor implementation of a pipe, + // the purpose-built io.Pipe should be used instead. + return &ioError{action: "read", err: errBufferWriteAfterNext} + } + } + + // Allocate initial buffer if empty. + if cap(d.buf) == 0 { + d.buf = make([]byte, 0, 64) + } + + // Check whether to grow the buffer. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(d.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(d.buf)) < d.previousOffsetEnd()/growthRateFactor + // If prevStart==0, then fetch was called in order to fetch more data + // to finish consuming a large JSON value contiguously. + // Grow if less than 25% of the remaining capacity is available. + // Note that this may cause the input buffer to exceed maxBufferSize. + grow = grow || (d.prevStart == 0 && len(d.buf) >= 3*cap(d.buf)/4) + + if grow { + // Allocate a new buffer and copy the contents of the old buffer over. + // TODO: Provide a hard limit on the maximum internal buffer size? + buf := make([]byte, 0, cap(d.buf)*growthSizeFactor) + d.buf = append(buf, d.buf[d.prevStart:]...) + } else { + // Move unread portion of the data to the front. + n := copy(d.buf[:cap(d.buf)], d.buf[d.prevStart:]) + d.buf = d.buf[:n] + } + d.baseOffset += int64(d.prevStart) + d.prevEnd -= d.prevStart + d.prevStart = 0 + + // Read more data into the internal buffer. + for { + n, err := d.rd.Read(d.buf[len(d.buf):cap(d.buf)]) + switch { + case n > 0: + d.buf = d.buf[:len(d.buf)+n] + return nil // ignore errors if any bytes are read + case err == io.EOF: + return io.ErrUnexpectedEOF + case err != nil: + return &ioError{action: "read", err: err} + default: + continue // Read returned (0, nil) + } + } +} + +const invalidateBufferByte = '#' // invalid starting character for JSON grammar + +// invalidatePreviousRead invalidates buffers returned by Peek and Read calls +// so that the first byte is an invalid character. +// This Hyrum-proofs the API against faulty application code that assumes +// values returned by ReadValue remain valid past subsequent Read calls. +func (d *decodeBuffer) invalidatePreviousRead() { + // Avoid mutating the buffer if d.rd is nil which implies that d.buf + // is provided by the user code and may not expect mutations. + isBytesBuffer := func(r io.Reader) bool { + _, ok := r.(*bytes.Buffer) + return ok + } + if d.rd != nil && !isBytesBuffer(d.rd) && d.prevStart < d.prevEnd && uint(d.prevStart) < uint(len(d.buf)) { + d.buf[d.prevStart] = invalidateBufferByte + d.prevStart = d.prevEnd + } +} + +// needMore reports whether there are no more unread bytes. +func (d *decodeBuffer) needMore(pos int) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return pos == len(d.buf) +} + +// injectSyntacticErrorWithPosition wraps a SyntacticError with the position, +// otherwise it returns the error as is. +// It takes a position relative to the start of the start of d.buf. +func (d *decodeBuffer) injectSyntacticErrorWithPosition(err error, pos int) error { + if serr, ok := err.(*SyntacticError); ok { + return serr.withOffset(d.baseOffset + int64(pos)) + } + return err +} + +func (d *decodeBuffer) previousOffsetStart() int64 { return d.baseOffset + int64(d.prevStart) } +func (d *decodeBuffer) previousOffsetEnd() int64 { return d.baseOffset + int64(d.prevEnd) } +func (d *decodeBuffer) previousBuffer() []byte { return d.buf[d.prevStart:d.prevEnd] } +func (d *decodeBuffer) unreadBuffer() []byte { return d.buf[d.prevEnd:len(d.buf)] } + +// PeekKind retrieves the next token kind, but does not advance the read offset. +// It returns 0 if there are no more tokens. +func (d *Decoder) PeekKind() Kind { + // Check whether we have a cached peek result. + if d.peekPos > 0 { + return Kind(d.buf[d.peekPos]).normalize() + } + + var err error + d.invalidatePreviousRead() + pos := d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + d.peekPos, d.peekErr = -1, err + return invalidKind + } + } + } + next := Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + err = d.injectSyntacticErrorWithPosition(err, pos) + d.peekPos, d.peekErr = -1, err + return invalidKind + } + + // This may set peekPos to zero, which is indistinguishable from + // the uninitialized state. While a small hit to performance, it is correct + // since ReadValue and ReadToken will disregard the cached result and + // recompute the next kind. + d.peekPos, d.peekErr = pos, nil + return next +} + +// SkipValue is semantically equivalent to calling ReadValue and discarding +// the result except that memory is not wasted trying to hold the entire result. +func (d *Decoder) SkipValue() error { + switch d.PeekKind() { + case '{', '[': + // For JSON objects and arrays, keep skipping all tokens + // until the depth matches the starting depth. + depth := d.tokens.depth() + for { + if _, err := d.ReadToken(); err != nil { + return err + } + if depth >= d.tokens.depth() { + return nil + } + } + default: + // Trying to skip a value when the next token is a '}' or ']' + // will result in an error being returned here. + if _, err := d.ReadValue(); err != nil { + return err + } + return nil + } +} + +// ReadToken reads the next Token, advancing the read offset. +// The returned token is only valid until the next Peek, Read, or Skip call. +// It returns io.EOF if there are no more tokens. +func (d *Decoder) ReadToken() (Token, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return Token{}, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return Token{}, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return Token{}, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next token. + var n int + switch next { + case 'n': + if consumeNull(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "null") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("null") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("null")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return Null, nil + + case 'f': + if consumeFalse(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "false") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("false") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("false")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return False, nil + + case 't': + if consumeTrue(d.buf[pos:]) == 0 { + pos, err = d.consumeLiteral(pos, "true") + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += len("true") + } + if err = d.tokens.appendLiteral(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-len("true")) // report position at start of literal + } + d.prevStart, d.prevEnd = pos, pos + return True, nil + + case '"': + var flags valueFlags // TODO: Preserve this in Token? + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + return Token{}, errInvalidNamespace + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + if err = d.tokens.appendString(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of string + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeNumber(pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + if err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + } else { + pos += n + } + if err = d.tokens.appendNumber(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of number + } + d.prevStart, d.prevEnd = pos-n, pos + return Token{raw: &d.decodeBuffer, num: uint64(d.previousOffsetStart())}, nil + + case '{': + if err = d.tokens.pushObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.push() + d.namespaces.push() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectStart, nil + + case '}': + if err = d.tokens.popObject(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + if !d.options.AllowDuplicateNames { + d.names.pop() + d.namespaces.pop() + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ObjectEnd, nil + + case '[': + if err = d.tokens.pushArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayStart, nil + + case ']': + if err = d.tokens.popArray(); err != nil { + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } + pos += 1 + d.prevStart, d.prevEnd = pos, pos + return ArrayEnd, nil + + default: + err = newInvalidCharacterError(d.buf[pos:], "at start of token") + return Token{}, d.injectSyntacticErrorWithPosition(err, pos) + } +} + +type valueFlags uint + +const ( + _ valueFlags = (1 << iota) / 2 // powers of two starting with zero + + stringNonVerbatim // string cannot be naively treated as valid UTF-8 + stringNonCanonical // string not formatted according to RFC 8785, section 3.2.2.2. + // TODO: Track whether a number is a non-integer? +) + +func (f *valueFlags) set(f2 valueFlags) { *f |= f2 } +func (f valueFlags) isVerbatim() bool { return f&stringNonVerbatim == 0 } +func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } + +// ReadValue returns the next raw JSON value, advancing the read offset. +// The value is stripped of any leading or trailing whitespace. +// The returned value is only valid until the next Peek, Read, or Skip call and +// may not be mutated while the Decoder remains in use. +// If the decoder is currently at the end token for an object or array, +// then it reports a SyntacticError and the internal state remains unchanged. +// It returns io.EOF if there are no more values. +func (d *Decoder) ReadValue() (RawValue, error) { + var flags valueFlags + return d.readValue(&flags) +} +func (d *Decoder) readValue(flags *valueFlags) (RawValue, error) { + // Determine the next kind. + var err error + var next Kind + pos := d.peekPos + if pos != 0 { + // Use cached peek result. + if d.peekErr != nil { + err := d.peekErr + d.peekPos, d.peekErr = 0, nil // possibly a transient I/O error + return nil, err + } + next = Kind(d.buf[pos]).normalize() + d.peekPos = 0 // reset cache + } else { + d.invalidatePreviousRead() + pos = d.prevEnd + + // Consume leading whitespace. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + if err == io.ErrUnexpectedEOF && d.tokens.depth() == 1 { + err = io.EOF // EOF possibly if no Tokens present after top-level value + } + return nil, err + } + } + + // Consume colon or comma. + var delim byte + if c := d.buf[pos]; c == ':' || c == ',' { + delim = c + pos += 1 + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return nil, err + } + } + } + next = Kind(d.buf[pos]).normalize() + if d.tokens.needDelim(next) != delim { + pos = d.prevEnd // restore position to right after leading whitespace + pos += consumeWhitespace(d.buf[pos:]) + err = d.tokens.checkDelim(delim, next) + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + } + + // Handle the next value. + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeValue(flags, pos) + newAbsPos := d.baseOffset + int64(pos) + n := int(newAbsPos - oldAbsPos) + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos) + } + switch next { + case 'n', 't', 'f': + err = d.tokens.appendLiteral() + case '"': + if !d.options.AllowDuplicateNames && d.tokens.last.needObjectName() { + if !d.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if d.tokens.last.isActiveNamespace() && !d.namespaces.last().insertQuoted(d.buf[pos-n:pos], flags.isVerbatim()) { + err = &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + break + } + d.names.replaceLastQuotedOffset(pos - n) // only replace if insertQuoted succeeds + } + err = d.tokens.appendString() + case '0': + err = d.tokens.appendNumber() + case '{': + if err = d.tokens.pushObject(); err != nil { + break + } + if err = d.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = d.tokens.pushArray(); err != nil { + break + } + if err = d.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return nil, d.injectSyntacticErrorWithPosition(err, pos-n) // report position at start of value + } + d.prevEnd = pos + d.prevStart = pos - n + return d.buf[pos-n : pos : pos], nil +} + +// checkEOF verifies that the input has no more data. +func (d *Decoder) checkEOF() error { + switch pos, err := d.consumeWhitespace(d.prevEnd); err { + case nil: + return newInvalidCharacterError(d.buf[pos:], "after top-level value") + case io.ErrUnexpectedEOF: + return nil + default: + return err + } +} + +// consumeWhitespace consumes all whitespace starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the last whitespace. +// If it returns nil, there is guaranteed to at least be one unread byte. +// +// The following pattern is common in this implementation: +// +// pos += consumeWhitespace(d.buf[pos:]) +// if d.needMore(pos) { +// if pos, err = d.consumeWhitespace(pos); err != nil { +// return ... +// } +// } +// +// It is difficult to simplify this without sacrificing performance since +// consumeWhitespace must be inlined. The body of the if statement is +// executed only in rare situations where we need to fetch more data. +// Since fetching may return an error, we also need to check the error. +func (d *Decoder) consumeWhitespace(pos int) (newPos int, err error) { + for { + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos, nil + } +} + +// consumeValue consumes a single JSON value starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the value. +func (d *Decoder) consumeValue(flags *valueFlags, pos int) (newPos int, err error) { + for { + var n int + var err error + switch next := Kind(d.buf[pos]).normalize(); next { + case 'n': + if n = consumeNull(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "null") + } + case 'f': + if n = consumeFalse(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "false") + } + case 't': + if n = consumeTrue(d.buf[pos:]); n == 0 { + n, err = consumeLiteral(d.buf[pos:], "true") + } + case '"': + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + return d.consumeString(flags, pos) + } + case '0': + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if n = consumeSimpleNumber(d.buf[pos:]); n == 0 || d.needMore(pos+n) { + return d.consumeNumber(pos) + } + case '{': + return d.consumeObject(flags, pos) + case '[': + return d.consumeArray(flags, pos) + default: + return pos, newInvalidCharacterError(d.buf[pos:], "at start of value") + } + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeLiteral consumes a single JSON literal starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the literal. +func (d *Decoder) consumeLiteral(pos int, lit string) (newPos int, err error) { + for { + n, err := consumeLiteral(d.buf[pos:], lit) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeString consumes a single JSON string starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the string. +func (d *Decoder) consumeString(flags *valueFlags, pos int) (newPos int, err error) { + var n int + for { + n, err = consumeStringResumable(flags, d.buf[pos:], n, !d.options.AllowInvalidUTF8) + if err == io.ErrUnexpectedEOF { + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeNumber consumes a single JSON number starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the number. +func (d *Decoder) consumeNumber(pos int) (newPos int, err error) { + var n int + var state consumeNumberState + for { + n, state, err = consumeNumberResumable(d.buf[pos:], n, state) + // NOTE: Since JSON numbers are not self-terminating, + // we need to make sure that the next byte is not part of a number. + if err == io.ErrUnexpectedEOF || d.needMore(pos+n) { + mayTerminate := err == nil + absPos := d.baseOffset + int64(pos) + err = d.fetch() // will mutate d.buf and invalidate pos + pos = int(absPos - d.baseOffset) + if err != nil { + if mayTerminate && err == io.ErrUnexpectedEOF { + return pos + n, nil + } + return pos, err + } + continue + } + return pos + n, err + } +} + +// consumeObject consumes a single JSON object starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the object. +func (d *Decoder) consumeObject(flags *valueFlags, pos int) (newPos int, err error) { + var n int + var names *objectNamespace + if !d.options.AllowDuplicateNames { + d.namespaces.push() + defer d.namespaces.pop() + names = d.namespaces.last() + } + + // Handle before start. + if d.buf[pos] != '{' { + panic("BUG: consumeObject must be called with a buffer that starts with '{'") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == '}' { + pos++ + return pos, nil + } + + for { + // Handle before name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + var flags2 valueFlags + if n = consumeSimpleString(d.buf[pos:]); n == 0 { + oldAbsPos := d.baseOffset + int64(pos) + pos, err = d.consumeString(&flags2, pos) + newAbsPos := d.baseOffset + int64(pos) + n = int(newAbsPos - oldAbsPos) + flags.set(flags2) + if err != nil { + return pos, err + } + } else { + pos += n + } + if !d.options.AllowDuplicateNames && !names.insertQuoted(d.buf[pos-n:pos], flags2.isVerbatim()) { + return pos - n, &SyntacticError{str: "duplicate name " + string(d.buf[pos-n:pos]) + " in object"} + } + + // Handle after name. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] != ':' { + return pos, newInvalidCharacterError(d.buf[pos:], "after object name (expecting ':')") + } + pos++ + + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case '}': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after object value (expecting ',' or '}')") + } + } +} + +// consumeArray consumes a single JSON array starting at d.buf[pos:]. +// It returns the new position in d.buf immediately after the array. +func (d *Decoder) consumeArray(flags *valueFlags, pos int) (newPos int, err error) { + // Handle before start. + if d.buf[pos] != '[' { + panic("BUG: consumeArray must be called with a buffer that starts with '['") + } + pos++ + + // Handle after start. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + if d.buf[pos] == ']' { + pos++ + return pos, nil + } + + for { + // Handle before value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + pos, err = d.consumeValue(flags, pos) + if err != nil { + return pos, err + } + + // Handle after value. + pos += consumeWhitespace(d.buf[pos:]) + if d.needMore(pos) { + if pos, err = d.consumeWhitespace(pos); err != nil { + return pos, err + } + } + switch d.buf[pos] { + case ',': + pos++ + continue + case ']': + pos++ + return pos, nil + default: + return pos, newInvalidCharacterError(d.buf[pos:], "after array value (expecting ',' or ']')") + } + } +} + +// InputOffset returns the current input byte offset. It gives the location +// of the next byte immediately after the most recently returned token or value. +// The number of bytes actually read from the underlying io.Reader may be more +// than this offset due to internal buffering effects. +func (d *Decoder) InputOffset() int64 { + return d.previousOffsetEnd() +} + +// UnreadBuffer returns the data remaining in the unread buffer, +// which may contain zero or more bytes. +// The returned buffer must not be mutated while Decoder continues to be used. +// The buffer contents are valid until the next Peek, Read, or Skip call. +func (d *Decoder) UnreadBuffer() []byte { + return d.unreadBuffer() +} + +// StackDepth returns the depth of the state machine for read JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (d *Decoder) StackDepth() int { + // NOTE: Keep in sync with Encoder.StackDepth. + return d.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (d *Decoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Encoder.StackIndex. + switch s := d.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently read value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (d *Decoder) StackPointer() string { + d.names.copyQuotedBuffer(d.buf) + return string(d.appendStackPointer(nil)) +} + +// consumeWhitespace consumes leading JSON whitespace per RFC 7159, section 2. +func consumeWhitespace(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + for len(b) > n && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n++ + } + return n +} + +// consumeNull consumes the next JSON null literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeNull(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "null" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeFalse consumes the next JSON false literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeFalse(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "false" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeTrue consumes the next JSON true literal per RFC 7159, section 3. +// It returns 0 if it is invalid, in which case consumeLiteral should be used. +func consumeTrue(b []byte) int { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + const literal = "true" + if len(b) >= len(literal) && string(b[:len(literal)]) == literal { + return len(literal) + } + return 0 +} + +// consumeLiteral consumes the next JSON literal per RFC 7159, section 3. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeLiteral(b []byte, lit string) (n int, err error) { + for i := 0; i < len(b) && i < len(lit); i++ { + if b[i] != lit[i] { + return i, newInvalidCharacterError(b[i:], "within literal "+lit+" (expecting "+strconv.QuoteRune(rune(lit[i]))+")") + } + } + if len(b) < len(lit) { + return len(b), io.ErrUnexpectedEOF + } + return len(lit), nil +} + +// consumeSimpleString consumes the next JSON string per RFC 7159, section 7 +// but is limited to the grammar for an ASCII string without escape sequences. +// It returns 0 if it is invalid or more complicated than a simple string, +// in which case consumeString should be called. +func consumeSimpleString(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[0] == '"' { + n++ + for len(b) > n && (' ' <= b[n] && b[n] != '\\' && b[n] != '"' && b[n] < utf8.RuneSelf) { + n++ + } + if len(b) > n && b[n] == '"' { + n++ + return n + } + } + return 0 +} + +// consumeString consumes the next JSON string per RFC 7159, section 7. +// If validateUTF8 is false, then this allows the presence of invalid UTF-8 +// characters within the string itself. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +func consumeString(flags *valueFlags, b []byte, validateUTF8 bool) (n int, err error) { + return consumeStringResumable(flags, b, 0, validateUTF8) +} + +// consumeStringResumable is identical to consumeString but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, validateUTF8 bool) (n int, err error) { + // Consume the leading double quote. + switch { + case resumeOffset > 0: + n = resumeOffset // already handled the leading quote + case uint(len(b)) == 0: + return n, io.ErrUnexpectedEOF + case b[0] == '"': + n++ + default: + return n, newInvalidCharacterError(b[n:], `at start of string (expecting '"')`) + } + + // Consume every character in the string. + for uint(len(b)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(b)) > uint(n) && noEscape(b[n]) { + n++ + } + if uint(len(b)) <= uint(n) { + return n, io.ErrUnexpectedEOF + } + + // Check for terminating double quote. + if b[n] == '"' { + n++ + return n, nil + } + + switch r, rn := utf8.DecodeRune(b[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + flags.set(stringNonVerbatim) + resumeOffset = n + if uint(len(b)) < uint(n+2) { + return resumeOffset, io.ErrUnexpectedEOF + } + switch r := b[n+1]; r { + case '/': + // Forward slash is the only character with 3 representations. + // Per RFC 8785, section 3.2.2.2., this must not be escaped. + flags.set(stringNonCanonical) + n += 2 + case '"', '\\', 'b', 'f', 'n', 'r', 't': + n += 2 + case 'u': + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v1, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + // Only certain control characters can use the \uFFFF notation + // for canonical formatting (per RFC 8785, section 3.2.2.2.). + switch v1 { + // \uFFFF notation not permitted for these characters. + case '\b', '\f', '\n', '\r', '\t': + flags.set(stringNonCanonical) + default: + // \uFFFF notation only permitted for control characters. + if v1 >= ' ' { + flags.set(stringNonCanonical) + } else { + // \uFFFF notation must be lower case. + for _, c := range b[n+2 : n+6] { + if 'A' <= c && c <= 'F' { + flags.set(stringNonCanonical) + } + } + } + } + n += 6 + + if validateUTF8 && utf16.IsSurrogate(rune(v1)) { + if uint(len(b)) >= uint(n+2) && (b[n] != '\\' || b[n+1] != 'u') { + return n, &SyntacticError{str: "invalid unpaired surrogate half within string"} + } + if uint(len(b)) < uint(n+6) { + if !hasEscapeSequencePrefix(b[n:]) { + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:])) + " within string"} + } + return resumeOffset, io.ErrUnexpectedEOF + } + v2, ok := parseHexUint16(b[n+2 : n+6]) + if !ok { + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} + } + if utf16.DecodeRune(rune(v1), rune(v2)) == utf8.RuneError { + return n, &SyntacticError{str: "invalid surrogate pair in string"} + } + n += 6 + } + default: + flags.set(stringNonCanonical) + return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+2])) + " within string"} + } + // Handle invalid UTF-8. + case r == utf8.RuneError: + if !utf8.FullRune(b[n:]) { + return n, io.ErrUnexpectedEOF + } + flags.set(stringNonVerbatim | stringNonCanonical) + if validateUTF8 { + return n, &SyntacticError{str: "invalid UTF-8 within string"} + } + n++ + // Handle invalid control characters. + case r < ' ': + flags.set(stringNonVerbatim | stringNonCanonical) + return n, newInvalidCharacterError(b[n:], "within string (expecting non-control character)") + default: + panic("BUG: unhandled character " + quoteRune(b[n:])) + } + } + return n, io.ErrUnexpectedEOF +} + +// hasEscapeSequencePrefix reports whether b is possibly +// the truncated prefix of a \uFFFF escape sequence. +func hasEscapeSequencePrefix(b []byte) bool { + for i, c := range b { + switch { + case i == 0 && c != '\\': + return false + case i == 1 && c != 'u': + return false + case i >= 2 && i < 6 && !('0' <= c && c <= '9') && !('a' <= c && c <= 'f') && !('A' <= c && c <= 'F'): + return false + } + } + return true +} + +// unescapeString appends the unescaped form of a JSON string in src to dst. +// Any invalid UTF-8 within the string will be replaced with utf8.RuneError. +// The input must be an entire JSON string with no surrounding whitespace. +func unescapeString(dst, src []byte) (v []byte, ok bool) { + // Consume leading double quote. + if uint(len(src)) == 0 || src[0] != '"' { + return dst, false + } + i, n := 1, 1 + + // Consume every character until completion. + for uint(len(src)) > uint(n) { + // Optimize for long sequences of unescaped characters. + noEscape := func(c byte) bool { + return c < utf8.RuneSelf && ' ' <= c && c != '\\' && c != '"' + } + for uint(len(src)) > uint(n) && noEscape(src[n]) { + n++ + } + if uint(len(src)) <= uint(n) { + break + } + + // Check for terminating double quote. + if src[n] == '"' { + dst = append(dst, src[i:n]...) + n++ + return dst, len(src) == n + } + + switch r, rn := utf8.DecodeRune(src[n:]); { + // Handle UTF-8 encoded byte sequence. + // Due to specialized handling of ASCII above, we know that + // all normal sequences at this point must be 2 bytes or larger. + case rn > 1: + n += rn + // Handle escape sequence. + case r == '\\': + dst = append(dst, src[i:n]...) + if r < ' ' { + return dst, false // invalid control character or unescaped quote + } + + // Handle escape sequence. + if uint(len(src)) < uint(n+2) { + return dst, false // truncated escape sequence + } + switch r := src[n+1]; r { + case '"', '\\', '/': + dst = append(dst, r) + n += 2 + case 'b': + dst = append(dst, '\b') + n += 2 + case 'f': + dst = append(dst, '\f') + n += 2 + case 'n': + dst = append(dst, '\n') + n += 2 + case 'r': + dst = append(dst, '\r') + n += 2 + case 't': + dst = append(dst, '\t') + n += 2 + case 'u': + if uint(len(src)) < uint(n+6) { + return dst, false // truncated escape sequence + } + v1, ok := parseHexUint16(src[n+2 : n+6]) + if !ok { + return dst, false // invalid escape sequence + } + n += 6 + + // Check whether this is a surrogate half. + r := rune(v1) + if utf16.IsSurrogate(r) { + r = utf8.RuneError // assume failure unless the following succeeds + if uint(len(src)) >= uint(n+6) && src[n+0] == '\\' && src[n+1] == 'u' { + if v2, ok := parseHexUint16(src[n+2 : n+6]); ok { + if r = utf16.DecodeRune(rune(v1), rune(v2)); r != utf8.RuneError { + n += 6 + } + } + } + } + + dst = utf8.AppendRune(dst, r) + default: + return dst, false // invalid escape sequence + } + i = n + // Handle invalid UTF-8. + case r == utf8.RuneError: + // NOTE: An unescaped string may be longer than the escaped string + // because invalid UTF-8 bytes are being replaced. + dst = append(dst, src[i:n]...) + dst = append(dst, "\uFFFD"...) + n += rn + i = n + // Handle invalid control characters. + case r < ' ': + dst = append(dst, src[i:n]...) + return dst, false // invalid control character or unescaped quote + default: + panic("BUG: unhandled character " + quoteRune(src[n:])) + } + } + dst = append(dst, src[i:n]...) + return dst, false // truncated input +} + +// unescapeStringMayCopy returns the unescaped form of b. +// If there are no escaped characters, the output is simply a subslice of +// the input with the surrounding quotes removed. +// Otherwise, a new buffer is allocated for the output. +func unescapeStringMayCopy(b []byte, isVerbatim bool) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if isVerbatim { + return b[len(`"`) : len(b)-len(`"`)] + } + b, _ = unescapeString(make([]byte, 0, len(b)), b) + return b +} + +// consumeSimpleNumber consumes the next JSON number per RFC 7159, section 6 +// but is limited to the grammar for a positive integer. +// It returns 0 if it is invalid or more complicated than a simple integer, +// in which case consumeNumber should be called. +func consumeSimpleNumber(b []byte) (n int) { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 { + if b[0] == '0' { + n++ + } else if '1' <= b[0] && b[0] <= '9' { + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + } else { + return 0 + } + if len(b) == n || !(b[n] == '.' || b[n] == 'e' || b[n] == 'E') { + return n + } + } + return 0 +} + +type consumeNumberState uint + +const ( + consumeNumberInit consumeNumberState = iota + beforeIntegerDigits + withinIntegerDigits + beforeFractionalDigits + withinFractionalDigits + beforeExponentDigits + withinExponentDigits +) + +// consumeNumber consumes the next JSON number per RFC 7159, section 6. +// It reports the number of bytes consumed and whether an error was encountered. +// If the input appears truncated, it returns io.ErrUnexpectedEOF. +// +// Note that JSON numbers are not self-terminating. +// If the entire input is consumed, then the caller needs to consider whether +// there may be subsequent unread data that may still be part of this number. +func consumeNumber(b []byte) (n int, err error) { + n, _, err = consumeNumberResumable(b, 0, consumeNumberInit) + return n, err +} + +// consumeNumberResumable is identical to consumeNumber but supports resuming +// from a previous call that returned io.ErrUnexpectedEOF. +func consumeNumberResumable(b []byte, resumeOffset int, state consumeNumberState) (n int, _ consumeNumberState, err error) { + // Jump to the right state when resuming from a partial consumption. + n = resumeOffset + if state > consumeNumberInit { + switch state { + case withinIntegerDigits, withinFractionalDigits, withinExponentDigits: + // Consume leading digits. + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + if len(b) == n { + return n, state, nil // still within the same state + } + state++ // switches "withinX" to "beforeY" where Y is the state after X + } + switch state { + case beforeIntegerDigits: + goto beforeInteger + case beforeFractionalDigits: + goto beforeFractional + case beforeExponentDigits: + goto beforeExponent + default: + return n, state, nil + } + } + + // Consume required integer component (with optional minus sign). +beforeInteger: + resumeOffset = n + if len(b) > 0 && b[0] == '-' { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeIntegerDigits, io.ErrUnexpectedEOF + case b[n] == '0': + n++ + state = beforeFractionalDigits + case '1' <= b[n] && b[n] <= '9': + n++ + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinIntegerDigits + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + + // Consume optional fractional component. +beforeFractional: + if len(b) > n && b[n] == '.' { + resumeOffset = n + n++ + switch { + case len(b) == n: + return resumeOffset, beforeFractionalDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinFractionalDigits + } + + // Consume optional exponent component. +beforeExponent: + if len(b) > n && (b[n] == 'e' || b[n] == 'E') { + resumeOffset = n + n++ + if len(b) > n && (b[n] == '-' || b[n] == '+') { + n++ + } + switch { + case len(b) == n: + return resumeOffset, beforeExponentDigits, io.ErrUnexpectedEOF + case '0' <= b[n] && b[n] <= '9': + n++ + default: + return n, state, newInvalidCharacterError(b[n:], "within number (expecting digit)") + } + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + n++ + } + state = withinExponentDigits + } + + return n, state, nil +} + +// parseHexUint16 is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-16. +// See https://go.dev/issue/42429. +func parseHexUint16(b []byte) (v uint16, ok bool) { + if len(b) != 4 { + return 0, false + } + for _, c := range b[:4] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = 10 + c - 'a' + case 'A' <= c && c <= 'F': + c = 10 + c - 'A' + default: + return 0, false + } + v = v*16 + uint16(c) + } + return v, true +} + +// parseDecUint is similar to strconv.ParseUint, +// but operates directly on []byte and is optimized for base-10. +// If the number is syntactically valid but overflows uint64, +// then it returns (math.MaxUint64, false). +// See https://go.dev/issue/42429. +func parseDecUint(b []byte) (v uint64, ok bool) { + // Overflow logic is based on strconv/atoi.go:138-149 from Go1.15, where: + // - cutoff is equal to math.MaxUint64/10+1, and + // - the n1 > maxVal check is unnecessary + // since maxVal is equivalent to math.MaxUint64. + var n int + var overflow bool + for len(b) > n && ('0' <= b[n] && b[n] <= '9') { + overflow = overflow || v >= math.MaxUint64/10+1 + v *= 10 + + v1 := v + uint64(b[n]-'0') + overflow = overflow || v1 < v + v = v1 + + n++ + } + if n == 0 || len(b) != n { + return 0, false + } + if overflow { + return math.MaxUint64, false + } + return v, true +} + +// parseFloat parses a floating point number according to the Go float grammar. +// Note that the JSON number grammar is a strict subset. +// +// If the number overflows the finite representation of a float, +// then we return MaxFloat since any finite value will always be infinitely +// more accurate at representing another finite value than an infinite value. +func parseFloat(b []byte, bits int) (v float64, ok bool) { + // Fast path for exact integer numbers which fit in the + // 24-bit or 53-bit significand of a float32 or float64. + var negLen int // either 0 or 1 + if len(b) > 0 && b[0] == '-' { + negLen = 1 + } + u, ok := parseDecUint(b[negLen:]) + if ok && ((bits == 32 && u <= 1<<24) || (bits == 64 && u <= 1<<53)) { + return math.Copysign(float64(u), float64(-1*negLen)), true + } + + // Note that the []byte->string conversion unfortunately allocates. + // See https://go.dev/issue/42429 for more information. + fv, err := strconv.ParseFloat(string(b), bits) + if math.IsInf(fv, 0) { + switch { + case bits == 32 && math.IsInf(fv, +1): + return +math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, +1): + return +math.MaxFloat64, true + case bits == 32 && math.IsInf(fv, -1): + return -math.MaxFloat32, true + case bits == 64 && math.IsInf(fv, -1): + return -math.MaxFloat64, true + } + } + return fv, err == nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go new file mode 100644 index 000000000..e4eefa3de --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements serialization of JSON +// as specified in RFC 4627, RFC 7159, RFC 7493, RFC 8259, and RFC 8785. +// JSON is a simple data interchange format that can represent +// primitive data types such as booleans, strings, and numbers, +// in addition to structured data types such as objects and arrays. +// +// # Terminology +// +// This package uses the terms "encode" and "decode" for syntactic functionality +// that is concerned with processing JSON based on its grammar, and +// uses the terms "marshal" and "unmarshal" for semantic functionality +// that determines the meaning of JSON values as Go values and vice-versa. +// It aims to provide a clear distinction between functionality that +// is purely concerned with encoding versus that of marshaling. +// For example, one can directly encode a stream of JSON tokens without +// needing to marshal a concrete Go value representing them. +// Similarly, one can decode a stream of JSON tokens without +// needing to unmarshal them into a concrete Go value. +// +// This package uses JSON terminology when discussing JSON, which may differ +// from related concepts in Go or elsewhere in computing literature. +// +// - A JSON "object" refers to an unordered collection of name/value members. +// - A JSON "array" refers to an ordered sequence of elements. +// - A JSON "value" refers to either a literal (i.e., null, false, or true), +// string, number, object, or array. +// +// See RFC 8259 for more information. +// +// # Specifications +// +// Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, +// and RFC 8785. Each RFC is generally a stricter subset of another RFC. +// In increasing order of strictness: +// +// - RFC 4627 and RFC 7159 do not require (but recommend) the use of UTF-8 +// and also do not require (but recommend) that object names be unique. +// - RFC 8259 requires the use of UTF-8, +// but does not require (but recommends) that object names be unique. +// - RFC 7493 requires the use of UTF-8 +// and also requires that object names be unique. +// - RFC 8785 defines a canonical representation. It requires the use of UTF-8 +// and also requires that object names be unique and in a specific ordering. +// It specifies exactly how strings and numbers must be formatted. +// +// The primary difference between RFC 4627 and RFC 7159 is that the former +// restricted top-level values to only JSON objects and arrays, while +// RFC 7159 and subsequent RFCs permit top-level values to additionally be +// JSON nulls, booleans, strings, or numbers. +// +// By default, this package operates on RFC 7493, but can be configured +// to operate according to the other RFC specifications. +// RFC 7493 is a stricter subset of RFC 8259 and fully compliant with it. +// In particular, it makes specific choices about behavior that RFC 8259 +// leaves as undefined in order to ensure greater interoperability. +// +// # JSON Representation of Go structs +// +// A Go struct is naturally represented as a JSON object, +// where each Go struct field corresponds with a JSON object member. +// When marshaling, all Go struct fields are recursively encoded in depth-first +// order as JSON object members except those that are ignored or omitted. +// When unmarshaling, JSON object members are recursively decoded +// into the corresponding Go struct fields. +// Object members that do not match any struct fields, +// also known as “unknown members”, are ignored by default or rejected +// if UnmarshalOptions.RejectUnknownMembers is specified. +// +// The representation of each struct field can be customized in the +// "json" struct field tag, where the tag is a comma separated list of options. +// As a special case, if the entire tag is `json:"-"`, +// then the field is ignored with regard to its JSON representation. +// +// The first option is the JSON object name override for the Go struct field. +// If the name is not specified, then the Go struct field name +// is used as the JSON object name. JSON names containing commas or quotes, +// or names identical to "" or "-", can be specified using +// a single-quoted string literal, where the syntax is identical to +// the Go grammar for a double-quoted string literal, +// but instead uses single quotes as the delimiters. +// By default, unmarshaling uses case-sensitive matching to identify +// the Go struct field associated with a JSON object name. +// +// After the name, the following tag options are supported: +// +// - omitzero: When marshaling, the "omitzero" option specifies that +// the struct field should be omitted if the field value is zero +// as determined by the "IsZero() bool" method if present, +// otherwise based on whether the field is the zero Go value. +// This option has no effect when unmarshaling. +// +// - omitempty: When marshaling, the "omitempty" option specifies that +// the struct field should be omitted if the field value would have been +// encoded as a JSON null, empty string, empty object, or empty array. +// This option has no effect when unmarshaling. +// +// - string: The "string" option specifies that +// MarshalOptions.StringifyNumbers and UnmarshalOptions.StringifyNumbers +// be set when marshaling or unmarshaling a struct field value. +// This causes numeric types to be encoded as a JSON number +// within a JSON string, and to be decoded from either a JSON number or +// a JSON string containing a JSON number. +// This extra level of encoding is often necessary since +// many JSON parsers cannot precisely represent 64-bit integers. +// +// - nocase: When unmarshaling, the "nocase" option specifies that +// if the JSON object name does not exactly match the JSON name +// for any of the struct fields, then it attempts to match the struct field +// using a case-insensitive match that also ignores dashes and underscores. +// If multiple fields match, the first declared field in breadth-first order +// takes precedence. This option has no effect when marshaling. +// +// - inline: The "inline" option specifies that +// the JSON representable content of this field type is to be promoted +// as if they were specified in the parent struct. +// It is the JSON equivalent of Go struct embedding. +// A Go embedded field is implicitly inlined unless an explicit JSON name +// is specified. The inlined field must be a Go struct +// (that does not implement any JSON methods), RawValue, map[string]T, +// or an unnamed pointer to such types. When marshaling, +// inlined fields from a pointer type are omitted if it is nil. +// Inlined fields of type RawValue and map[string]T are called +// “inlined fallbacks” as they can represent all possible +// JSON object members not directly handled by the parent struct. +// Only one inlined fallback field may be specified in a struct, +// while many non-fallback fields may be specified. This option +// must not be specified with any other option (including the JSON name). +// +// - unknown: The "unknown" option is a specialized variant +// of the inlined fallback to indicate that this Go struct field +// contains any number of unknown JSON object members. The field type +// must be a RawValue, map[string]T, or an unnamed pointer to such types. +// If MarshalOptions.DiscardUnknownMembers is specified when marshaling, +// the contents of this field are ignored. +// If UnmarshalOptions.RejectUnknownMembers is specified when unmarshaling, +// any unknown object members are rejected regardless of whether +// an inlined fallback with the "unknown" option exists. This option +// must not be specified with any other option (including the JSON name). +// +// - format: The "format" option specifies a format flag +// used to specialize the formatting of the field value. +// The option is a key-value pair specified as "format:value" where +// the value must be either a literal consisting of letters and numbers +// (e.g., "format:RFC3339") or a single-quoted string literal +// (e.g., "format:'2006-01-02'"). The interpretation of the format flag +// is determined by the struct field type. +// +// The "omitzero" and "omitempty" options are mostly semantically identical. +// The former is defined in terms of the Go type system, +// while the latter in terms of the JSON type system. +// Consequently they behave differently in some circumstances. +// For example, only a nil slice or map is omitted under "omitzero", while +// an empty slice or map is omitted under "omitempty" regardless of nilness. +// The "omitzero" option is useful for types with a well-defined zero value +// (e.g., netip.Addr) or have an IsZero method (e.g., time.Time). +// +// Every Go struct corresponds to a list of JSON representable fields +// which is constructed by performing a breadth-first search over +// all struct fields (excluding unexported or ignored fields), +// where the search recursively descends into inlined structs. +// The set of non-inlined fields in a struct must have unique JSON names. +// If multiple fields all have the same JSON name, then the one +// at shallowest depth takes precedence and the other fields at deeper depths +// are excluded from the list of JSON representable fields. +// If multiple fields at the shallowest depth have the same JSON name, +// then all of those fields are excluded from the list. This is analogous to +// Go visibility rules for struct field selection with embedded struct types. +// +// Marshaling or unmarshaling a non-empty struct +// without any JSON representable fields results in a SemanticError. +// Unexported fields must not have any `json` tags except for `json:"-"`. +package json + +// requireKeyedLiterals can be embedded in a struct to require keyed literals. +type requireKeyedLiterals struct{} + +// nonComparable can be embedded in a struct to prevent comparability. +type nonComparable [0]func() diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go new file mode 100644 index 000000000..5b81ca15a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -0,0 +1,1170 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math" + "math/bits" + "strconv" + "unicode/utf16" + "unicode/utf8" +) + +// EncodeOptions configures how JSON encoding operates. +// The zero value is equivalent to the default settings, +// which is compliant with both RFC 7493 and RFC 8259. +type EncodeOptions struct { + requireKeyedLiterals + nonComparable + + // multiline specifies whether the encoder should emit multiline output. + multiline bool + + // omitTopLevelNewline specifies whether to omit the newline + // that is appended after every top-level JSON value when streaming. + omitTopLevelNewline bool + + // AllowDuplicateNames specifies that JSON objects may contain + // duplicate member names. Disabling the duplicate name check may provide + // performance benefits, but breaks compliance with RFC 7493, section 2.3. + // The output will still be compliant with RFC 8259, + // which leaves the handling of duplicate names as unspecified behavior. + AllowDuplicateNames bool + + // AllowInvalidUTF8 specifies that JSON strings may contain invalid UTF-8, + // which will be mangled as the Unicode replacement character, U+FFFD. + // This causes the encoder to break compliance with + // RFC 7493, section 2.1, and RFC 8259, section 8.1. + AllowInvalidUTF8 bool + + // preserveRawStrings specifies that WriteToken and WriteValue should not + // reformat any JSON string, but keep the formatting verbatim. + preserveRawStrings bool + + // canonicalizeNumbers specifies that WriteToken and WriteValue should + // reformat any JSON numbers according to RFC 8785, section 3.2.2.3. + canonicalizeNumbers bool + + // EscapeRune reports whether the provided character should be escaped + // as a hexadecimal Unicode codepoint (e.g., \ufffd). + // If nil, the shortest and simplest encoding will be used, + // which is also the formatting specified by RFC 8785, section 3.2.2.2. + EscapeRune func(rune) bool + + // Indent (if non-empty) specifies that the encoder should emit multiline + // output where each element in a JSON object or array begins on a new, + // indented line beginning with the indent prefix followed by one or more + // copies of indent according to the indentation nesting. + // It may only be composed of space or tab characters. + Indent string + + // IndentPrefix is prepended to each line within a JSON object or array. + // The purpose of the indent prefix is to encode data that can more easily + // be embedded inside other formatted JSON data. + // It may only be composed of space or tab characters. + // It is ignored if Indent is empty. + IndentPrefix string +} + +// Encoder is a streaming encoder from raw JSON tokens and values. +// It is used to write a stream of top-level JSON values, +// each terminated with a newline character. +// +// WriteToken and WriteValue calls may be interleaved. +// For example, the following JSON value: +// +// {"name":"value","array":[null,false,true,3.14159],"object":{"k":"v"}} +// +// can be composed with the following calls (ignoring errors for brevity): +// +// e.WriteToken(ObjectStart) // { +// e.WriteToken(String("name")) // "name" +// e.WriteToken(String("value")) // "value" +// e.WriteValue(RawValue(`"array"`)) // "array" +// e.WriteToken(ArrayStart) // [ +// e.WriteToken(Null) // null +// e.WriteToken(False) // false +// e.WriteValue(RawValue("true")) // true +// e.WriteToken(Float(3.14159)) // 3.14159 +// e.WriteToken(ArrayEnd) // ] +// e.WriteValue(RawValue(`"object"`)) // "object" +// e.WriteValue(RawValue(`{"k":"v"}`)) // {"k":"v"} +// e.WriteToken(ObjectEnd) // } +// +// The above is one of many possible sequence of calls and +// may not represent the most sensible method to call for any given token/value. +// For example, it is probably more common to call WriteToken with a string +// for object names. +type Encoder struct { + state + encodeBuffer + options EncodeOptions + + seenPointers seenPointers // only used when marshaling +} + +// encodeBuffer is a buffer split into 2 segments: +// +// - buf[0:len(buf)] // written (but unflushed) portion of the buffer +// - buf[len(buf):cap(buf)] // unused portion of the buffer +type encodeBuffer struct { + buf []byte // may alias wr if it is a bytes.Buffer + + // baseOffset is added to len(buf) to obtain the absolute offset + // relative to the start of io.Writer stream. + baseOffset int64 + + wr io.Writer + + // maxValue is the approximate maximum RawValue size passed to WriteValue. + maxValue int + // unusedCache is the buffer returned by the UnusedBuffer method. + unusedCache []byte + // bufStats is statistics about buffer utilization. + // It is only used with pooled encoders in pools.go. + bufStats bufferStatistics +} + +// NewEncoder constructs a new streaming encoder writing to w. +func NewEncoder(w io.Writer) *Encoder { + return EncodeOptions{}.NewEncoder(w) +} + +// NewEncoder constructs a new streaming encoder writing to w +// configured with the provided options. +// It flushes the internal buffer when the buffer is sufficiently full or +// when a top-level value has been written. +// +// If w is a bytes.Buffer, then the encoder appends directly into the buffer +// without copying the contents from an intermediate buffer. +func (o EncodeOptions) NewEncoder(w io.Writer) *Encoder { + e := new(Encoder) + o.ResetEncoder(e, w) + return e +} + +// ResetEncoder resets an encoder such that it is writing afresh to w and +// configured with the provided options. +func (o EncodeOptions) ResetEncoder(e *Encoder, w io.Writer) { + if e == nil { + panic("json: invalid nil Encoder") + } + if w == nil { + panic("json: invalid nil io.Writer") + } + e.reset(nil, w, o) +} + +func (e *Encoder) reset(b []byte, w io.Writer, o EncodeOptions) { + if len(o.Indent) > 0 { + o.multiline = true + if s := trimLeftSpaceTab(o.IndentPrefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(o.Indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + } + e.state.reset() + e.encodeBuffer = encodeBuffer{buf: b, wr: w, bufStats: e.bufStats} + e.options = o + if bb, ok := w.(*bytes.Buffer); ok && bb != nil { + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + } +} + +// Reset resets an encoder such that it is writing afresh to w but +// keeps any pre-existing encoder options. +func (e *Encoder) Reset(w io.Writer) { + e.options.ResetEncoder(e, w) +} + +// needFlush determines whether to flush at this point. +func (e *Encoder) needFlush() bool { + // NOTE: This function is carefully written to be inlineable. + + // Avoid flushing if e.wr is nil since there is no underlying writer. + // Flush if less than 25% of the capacity remains. + // Flushing at some constant fraction ensures that the buffer stops growing + // so long as the largest Token or Value fits within that unused capacity. + return e.wr != nil && (e.tokens.depth() == 1 || len(e.buf) > 3*cap(e.buf)/4) +} + +// flush flushes the buffer to the underlying io.Writer. +// It may append a trailing newline after the top-level value. +func (e *Encoder) flush() error { + if e.wr == nil || e.avoidFlush() { + return nil + } + + // In streaming mode, always emit a newline after the top-level value. + if e.tokens.depth() == 1 && !e.options.omitTopLevelNewline { + e.buf = append(e.buf, '\n') + } + + // Inform objectNameStack that we are about to flush the buffer content. + e.names.copyQuotedBuffer(e.buf) + + // Specialize bytes.Buffer for better performance. + if bb, ok := e.wr.(*bytes.Buffer); ok { + // If e.buf already aliases the internal buffer of bb, + // then the Write call simply increments the internal offset, + // otherwise Write operates as expected. + // See https://go.dev/issue/42986. + n, _ := bb.Write(e.buf) // never fails unless bb is nil + e.baseOffset += int64(n) + + // If the internal buffer of bytes.Buffer is too small, + // append operations elsewhere in the Encoder may grow the buffer. + // This would be semantically correct, but hurts performance. + // As such, ensure 25% of the current length is always available + // to reduce the probability that other appends must allocate. + if avail := bb.Cap() - bb.Len(); avail < bb.Len()/4 { + bb.Grow(avail + 1) + } + + e.buf = bb.Bytes()[bb.Len():] // alias the unused buffer of bb + return nil + } + + // Flush the internal buffer to the underlying io.Writer. + n, err := e.wr.Write(e.buf) + e.baseOffset += int64(n) + if err != nil { + // In the event of an error, preserve the unflushed portion. + // Thus, write errors aren't fatal so long as the io.Writer + // maintains consistent state after errors. + if n > 0 { + e.buf = e.buf[:copy(e.buf, e.buf[n:])] + } + return &ioError{action: "write", err: err} + } + e.buf = e.buf[:0] + + // Check whether to grow the buffer. + // Note that cap(e.buf) may already exceed maxBufferSize since + // an append elsewhere already grew it to store a large token. + const maxBufferSize = 4 << 10 + const growthSizeFactor = 2 // higher value is faster + const growthRateFactor = 2 // higher value is slower + // By default, grow if below the maximum buffer size. + grow := cap(e.buf) <= maxBufferSize/growthSizeFactor + // Growing can be expensive, so only grow + // if a sufficient number of bytes have been processed. + grow = grow && int64(cap(e.buf)) < e.previousOffsetEnd()/growthRateFactor + if grow { + e.buf = make([]byte, 0, cap(e.buf)*growthSizeFactor) + } + + return nil +} + +func (e *encodeBuffer) previousOffsetEnd() int64 { return e.baseOffset + int64(len(e.buf)) } +func (e *encodeBuffer) unflushedBuffer() []byte { return e.buf } + +// avoidFlush indicates whether to avoid flushing to ensure there is always +// enough in the buffer to unwrite the last object member if it were empty. +func (e *Encoder) avoidFlush() bool { + switch { + case e.tokens.last.length() == 0: + // Never flush after ObjectStart or ArrayStart since we don't know yet + // if the object or array will end up being empty. + return true + case e.tokens.last.needObjectValue(): + // Never flush before the object value since we don't know yet + // if the object value will end up being empty. + return true + case e.tokens.last.needObjectName() && len(e.buf) >= 2: + // Never flush after the object value if it does turn out to be empty. + switch string(e.buf[len(e.buf)-2:]) { + case `ll`, `""`, `{}`, `[]`: // last two bytes of every empty value + return true + } + } + return false +} + +// unwriteEmptyObjectMember unwrites the last object member if it is empty +// and reports whether it performed an unwrite operation. +func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { + if last := e.tokens.last; !last.isObject() || !last.needObjectName() || last.length() == 0 { + panic("BUG: must be called on an object after writing a value") + } + + // The flushing logic is modified to never flush a trailing empty value. + // The encoder never writes trailing whitespace eagerly. + b := e.unflushedBuffer() + + // Detect whether the last value was empty. + var n int + if len(b) >= 3 { + switch string(b[len(b)-2:]) { + case "ll": // last two bytes of `null` + n = len(`null`) + case `""`: + // It is possible for a non-empty string to have `""` as a suffix + // if the second to the last quote was escaped. + if b[len(b)-3] == '\\' { + return false // e.g., `"\""` is not empty + } + n = len(`""`) + case `{}`: + n = len(`{}`) + case `[]`: + n = len(`[]`) + } + } + if n == 0 { + return false + } + + // Unwrite the value, whitespace, colon, name, whitespace, and comma. + b = b[:len(b)-n] + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ':') + b = trimSuffixString(b) + b = trimSuffixWhitespace(b) + b = trimSuffixByte(b, ',') + e.buf = b // store back truncated unflushed buffer + + // Undo state changes. + e.tokens.last.decrement() // for object member value + e.tokens.last.decrement() // for object member name + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + if prevName != nil { + e.names.copyQuotedBuffer(e.buf) // required by objectNameStack.replaceLastUnquotedName + e.names.replaceLastUnquotedName(*prevName) + } + } + return true +} + +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + +func trimSuffixWhitespace(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + n := len(b) - 1 + for n >= 0 && (b[n] == ' ' || b[n] == '\t' || b[n] == '\r' || b[n] == '\n') { + n-- + } + return b[:n+1] +} + +func trimSuffixString(b []byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + for len(b) >= 2 && !(b[len(b)-1] == '"' && b[len(b)-2] != '\\') { + b = b[:len(b)-1] // trim all characters except an unescaped quote + } + if len(b) > 0 && b[len(b)-1] == '"' { + b = b[:len(b)-1] + } + return b +} + +func hasSuffixByte(b []byte, c byte) bool { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + return len(b) > 0 && b[len(b)-1] == c +} + +func trimSuffixByte(b []byte, c byte) []byte { + // NOTE: The arguments and logic are kept simple to keep this inlineable. + if len(b) > 0 && b[len(b)-1] == c { + return b[:len(b)-1] + } + return b +} + +// WriteToken writes the next token and advances the internal write offset. +// +// The provided token kind must be consistent with the JSON grammar. +// For example, it is an error to provide a number when the encoder +// is expecting an object name (which is always a string), or +// to provide an end object delimiter when the encoder is finishing an array. +// If the provided token is invalid, then it reports a SyntacticError and +// the internal state remains unchanged. +func (e *Encoder) WriteToken(t Token) error { + k := t.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the token to the output and to the state machine. + var err error + switch k { + case 'n': + b = append(b, "null"...) + err = e.tokens.appendLiteral() + case 'f': + b = append(b, "false"...) + err = e.tokens.appendLiteral() + case 't': + b = append(b, "true"...) + err = e.tokens.appendLiteral() + case '"': + n0 := len(b) // offset before calling t.appendString + if b, err = t.appendString(b, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune); err != nil { + break + } + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + if b, err = t.appendNumber(b, e.options.canonicalizeNumbers); err != nil { + break + } + err = e.tokens.appendNumber() + case '{': + b = append(b, '{') + if err = e.tokens.pushObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.push() + e.namespaces.push() + } + case '}': + b = append(b, '}') + if err = e.tokens.popObject(); err != nil { + break + } + if !e.options.AllowDuplicateNames { + e.names.pop() + e.namespaces.pop() + } + case '[': + b = append(b, '[') + err = e.tokens.pushArray() + case ']': + b = append(b, ']') + err = e.tokens.popArray() + default: + return &SyntacticError{str: "invalid json.Token"} + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +const ( + rawIntNumber = -1 + rawUintNumber = -2 +) + +// writeNumber is specialized version of WriteToken, but optimized for numbers. +// As a special-case, if bits is -1 or -2, it will treat v as +// the raw-encoded bits of an int64 or uint64, respectively. +// It is only called from arshal_default.go. +func (e *Encoder) writeNumber(v float64, bits int, quote bool) error { + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, '0') + if e.options.multiline { + b = e.appendWhitespace(b, '0') + } + + if quote { + // Append the value to the output. + n0 := len(b) // offset before appending the number + b = append(b, '"') + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + b = append(b, '"') + + // Escape the string if necessary. + if e.options.EscapeRune != nil { + b2 := append(e.unusedCache, b[n0+len(`"`):len(b)-len(`"`)]...) + b, _ = appendString(b[:n0], string(b2), false, e.options.EscapeRune) + e.unusedCache = b2[:0] + } + + // Update the state machine. + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + return errInvalidNamespace + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + return &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + if err := e.tokens.appendString(); err != nil { + return err + } + } else { + switch bits { + case rawIntNumber: + b = strconv.AppendInt(b, int64(math.Float64bits(v)), 10) + case rawUintNumber: + b = strconv.AppendUint(b, uint64(math.Float64bits(v)), 10) + default: + b = appendNumber(b, v, bits) + } + if err := e.tokens.appendNumber(); err != nil { + return err + } + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// WriteValue writes the next raw value and advances the internal write offset. +// The Encoder does not simply copy the provided value verbatim, but +// parses it to ensure that it is syntactically valid and reformats it +// according to how the Encoder is configured to format whitespace and strings. +// +// The provided value kind must be consistent with the JSON grammar +// (see examples on Encoder.WriteToken). If the provided value is invalid, +// then it reports a SyntacticError and the internal state remains unchanged. +func (e *Encoder) WriteValue(v RawValue) error { + e.maxValue |= len(v) // bitwise OR is a fast approximation of max + + k := v.Kind() + b := e.buf // use local variable to avoid mutating e in case of error + + // Append any delimiters or optional whitespace. + b = e.tokens.mayAppendDelim(b, k) + if e.options.multiline { + b = e.appendWhitespace(b, k) + } + + // Append the value the output. + var err error + v = v[consumeWhitespace(v):] + n0 := len(b) // offset before calling e.reformatValue + b, v, err = e.reformatValue(b, v, e.tokens.depth()) + if err != nil { + return err + } + v = v[consumeWhitespace(v):] + if len(v) > 0 { + return newInvalidCharacterError(v[0:], "after top-level value") + } + + // Append the kind to the state machine. + switch k { + case 'n', 'f', 't': + err = e.tokens.appendLiteral() + case '"': + if !e.options.AllowDuplicateNames && e.tokens.last.needObjectName() { + if !e.tokens.last.isValidNamespace() { + err = errInvalidNamespace + break + } + if e.tokens.last.isActiveNamespace() && !e.namespaces.last().insertQuoted(b[n0:], false) { + err = &SyntacticError{str: "duplicate name " + string(b[n0:]) + " in object"} + break + } + e.names.replaceLastQuotedOffset(n0) // only replace if insertQuoted succeeds + } + err = e.tokens.appendString() + case '0': + err = e.tokens.appendNumber() + case '{': + if err = e.tokens.pushObject(); err != nil { + break + } + if err = e.tokens.popObject(); err != nil { + panic("BUG: popObject should never fail immediately after pushObject: " + err.Error()) + } + case '[': + if err = e.tokens.pushArray(); err != nil { + break + } + if err = e.tokens.popArray(); err != nil { + panic("BUG: popArray should never fail immediately after pushArray: " + err.Error()) + } + } + if err != nil { + return err + } + + // Finish off the buffer and store it back into e. + e.buf = b + if e.needFlush() { + return e.flush() + } + return nil +} + +// appendWhitespace appends whitespace that immediately precedes the next token. +func (e *Encoder) appendWhitespace(b []byte, next Kind) []byte { + if e.tokens.needDelim(next) == ':' { + return append(b, ' ') + } else { + return e.appendIndent(b, e.tokens.needIndent(next)) + } +} + +// appendIndent appends the appropriate number of indentation characters +// for the current nested level, n. +func (e *Encoder) appendIndent(b []byte, n int) []byte { + if n == 0 { + return b + } + b = append(b, '\n') + b = append(b, e.options.IndentPrefix...) + for ; n > 1; n-- { + b = append(b, e.options.Indent...) + } + return b +} + +// reformatValue parses a JSON value from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatValue(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // TODO: Should this update valueFlags as input? + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + var n int + var err error + switch k := Kind(src[0]).normalize(); k { + case 'n': + if n = consumeNull(src); n == 0 { + n, err = consumeLiteral(src, "null") + } + case 'f': + if n = consumeFalse(src); n == 0 { + n, err = consumeLiteral(src, "false") + } + case 't': + if n = consumeTrue(src); n == 0 { + n, err = consumeLiteral(src, "true") + } + case '"': + if n := consumeSimpleString(src); n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + return dst, src, nil + } + return reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + case '0': + if n := consumeSimpleNumber(src); n > 0 && !e.options.canonicalizeNumbers { + dst, src = append(dst, src[:n]...), src[n:] // copy simple numbers verbatim + return dst, src, nil + } + return reformatNumber(dst, src, e.options.canonicalizeNumbers) + case '{': + return e.reformatObject(dst, src, depth) + case '[': + return e.reformatArray(dst, src, depth) + default: + return dst, src, newInvalidCharacterError(src, "at start of value") + } + if err != nil { + return dst, src, err + } + dst, src = append(dst, src[:n]...), src[n:] + return dst, src, nil +} + +// reformatObject parses a JSON object from the start of src and +// appends it to the end of src, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatObject(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append object start. + if src[0] != '{' { + panic("BUG: reformatObject must be called with a buffer that starts with '{'") + } + dst, src = append(dst, '{'), src[1:] + + // Append (possible) object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == '}' { + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + } + + var err error + var names *objectNamespace + if !e.options.AllowDuplicateNames { + e.namespaces.push() + defer e.namespaces.pop() + names = e.namespaces.last() + } + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append object name. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + n0 := len(dst) // offset before calling reformatString + n := consumeSimpleString(src) + if n > 0 && e.options.EscapeRune == nil { + dst, src = append(dst, src[:n]...), src[n:] // copy simple strings verbatim + } else { + dst, src, err = reformatString(dst, src, !e.options.AllowInvalidUTF8, e.options.preserveRawStrings, e.options.EscapeRune) + } + if err != nil { + return dst, src, err + } + if !e.options.AllowDuplicateNames && !names.insertQuoted(dst[n0:], false) { + return dst, src, &SyntacticError{str: "duplicate name " + string(dst[n0:]) + " in object"} + } + + // Append colon. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] != ':' { + return dst, src, newInvalidCharacterError(src, "after object name (expecting ':')") + } + dst, src = append(dst, ':'), src[1:] + if e.options.multiline { + dst = append(dst, ' ') + } + + // Append object value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or object end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case '}': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, '}'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after object value (expecting ',' or '}')") + } + } +} + +// reformatArray parses a JSON array from the start of src and +// appends it to the end of dst, reformatting whitespace and strings as needed. +// It returns the updated versions of dst and src. +func (e *Encoder) reformatArray(dst []byte, src RawValue, depth int) ([]byte, RawValue, error) { + // Append array start. + if src[0] != '[' { + panic("BUG: reformatArray must be called with a buffer that starts with '['") + } + dst, src = append(dst, '['), src[1:] + + // Append (possible) array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + if src[0] == ']' { + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + } + + var err error + depth++ + for { + // Append optional newline and indentation. + if e.options.multiline { + dst = e.appendIndent(dst, depth) + } + + // Append array value. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + dst, src, err = e.reformatValue(dst, src, depth) + if err != nil { + return dst, src, err + } + + // Append comma or array end. + src = src[consumeWhitespace(src):] + if len(src) == 0 { + return dst, src, io.ErrUnexpectedEOF + } + switch src[0] { + case ',': + dst, src = append(dst, ','), src[1:] + continue + case ']': + if e.options.multiline { + dst = e.appendIndent(dst, depth-1) + } + dst, src = append(dst, ']'), src[1:] + return dst, src, nil + default: + return dst, src, newInvalidCharacterError(src, "after array value (expecting ',' or ']')") + } + } +} + +// OutputOffset returns the current output byte offset. It gives the location +// of the next byte immediately after the most recently written token or value. +// The number of bytes actually written to the underlying io.Writer may be less +// than this offset due to internal buffering effects. +func (e *Encoder) OutputOffset() int64 { + return e.previousOffsetEnd() +} + +// UnusedBuffer returns a zero-length buffer with a possible non-zero capacity. +// This buffer is intended to be used to populate a RawValue +// being passed to an immediately succeeding WriteValue call. +// +// Example usage: +// +// b := d.UnusedBuffer() +// b = append(b, '"') +// b = appendString(b, v) // append the string formatting of v +// b = append(b, '"') +// ... := d.WriteValue(b) +// +// It is the user's responsibility to ensure that the value is valid JSON. +func (e *Encoder) UnusedBuffer() []byte { + // NOTE: We don't return e.buf[len(e.buf):cap(e.buf)] since WriteValue would + // need to take special care to avoid mangling the data while reformatting. + // WriteValue can't easily identify whether the input RawValue aliases e.buf + // without using unsafe.Pointer. Thus, we just return a different buffer. + // Should this ever alias e.buf, we need to consider how it operates with + // the specialized performance optimization for bytes.Buffer. + n := 1 << bits.Len(uint(e.maxValue|63)) // fast approximation for max length + if cap(e.unusedCache) < n { + e.unusedCache = make([]byte, 0, n) + } + return e.unusedCache +} + +// StackDepth returns the depth of the state machine for written JSON data. +// Each level on the stack represents a nested JSON object or array. +// It is incremented whenever an ObjectStart or ArrayStart token is encountered +// and decremented whenever an ObjectEnd or ArrayEnd token is encountered. +// The depth is zero-indexed, where zero represents the top-level JSON value. +func (e *Encoder) StackDepth() int { + // NOTE: Keep in sync with Decoder.StackDepth. + return e.tokens.depth() - 1 +} + +// StackIndex returns information about the specified stack level. +// It must be a number between 0 and StackDepth, inclusive. +// For each level, it reports the kind: +// +// - 0 for a level of zero, +// - '{' for a level representing a JSON object, and +// - '[' for a level representing a JSON array. +// +// It also reports the length of that JSON object or array. +// Each name and value in a JSON object is counted separately, +// so the effective number of members would be half the length. +// A complete JSON object must have an even length. +func (e *Encoder) StackIndex(i int) (Kind, int) { + // NOTE: Keep in sync with Decoder.StackIndex. + switch s := e.tokens.index(i); { + case i > 0 && s.isObject(): + return '{', s.length() + case i > 0 && s.isArray(): + return '[', s.length() + default: + return 0, s.length() + } +} + +// StackPointer returns a JSON Pointer (RFC 6901) to the most recently written value. +// Object names are only present if AllowDuplicateNames is false, otherwise +// object members are represented using their index within the object. +func (e *Encoder) StackPointer() string { + e.names.copyQuotedBuffer(e.buf) + return string(e.appendStackPointer(nil)) +} + +// appendString appends src to dst as a JSON string per RFC 7159, section 7. +// +// If validateUTF8 is specified, this rejects input that contains invalid UTF-8 +// otherwise invalid bytes are replaced with the Unicode replacement character. +// If escapeRune is provided, it specifies which runes to escape using +// hexadecimal sequences. If nil, the shortest representable form is used, +// which is also the canonical form for strings (RFC 8785, section 3.2.2.2). +// +// Note that this API allows full control over the formatting of strings +// except for whether a forward solidus '/' may be formatted as '\/' and +// the casing of hexadecimal Unicode escape sequences. +func appendString(dst []byte, src string, validateUTF8 bool, escapeRune func(rune) bool) ([]byte, error) { + appendEscapedASCII := func(dst []byte, c byte) []byte { + switch c { + case '"', '\\': + dst = append(dst, '\\', c) + case '\b': + dst = append(dst, "\\b"...) + case '\f': + dst = append(dst, "\\f"...) + case '\n': + dst = append(dst, "\\n"...) + case '\r': + dst = append(dst, "\\r"...) + case '\t': + dst = append(dst, "\\t"...) + default: + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(c)) + } + return dst + } + appendEscapedUnicode := func(dst []byte, r rune) []byte { + if r1, r2 := utf16.EncodeRune(r); r1 != '\ufffd' && r2 != '\ufffd' { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r1)) + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r2)) + } else { + dst = append(dst, "\\u"...) + dst = appendHexUint16(dst, uint16(r)) + } + return dst + } + + // Optimize for when escapeRune is nil. + if escapeRune == nil { + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + // Handle single-byte ASCII. + if c := src[n]; c < utf8.RuneSelf { + n++ + if c < ' ' || c == '"' || c == '\\' { + dst = append(dst, src[i:n-1]...) + dst = appendEscapedASCII(dst, c) + i = n + } + continue + } + + // Handle multi-byte Unicode. + _, rn := utf8.DecodeRuneInString(src[n:]) + n += rn + if rn == 1 { // must be utf8.RuneError since we already checked for single-byte ASCII + dst = append(dst, src[i:n-rn]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + dst = append(dst, "\ufffd"...) + i = n + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil + } + + // Slower implementation for when escapeRune is non-nil. + var i, n int + dst = append(dst, '"') + for uint(len(src)) > uint(n) { + switch r, rn := utf8.DecodeRuneInString(src[n:]); { + case r == utf8.RuneError && rn == 1: + dst = append(dst, src[i:n]...) + if validateUTF8 { + return dst, &SyntacticError{str: "invalid UTF-8 within string"} + } + if escapeRune('\ufffd') { + dst = append(dst, `\ufffd`...) + } else { + dst = append(dst, "\ufffd"...) + } + n += rn + i = n + case escapeRune(r): + dst = append(dst, src[i:n]...) + dst = appendEscapedUnicode(dst, r) + n += rn + i = n + case r < ' ' || r == '"' || r == '\\': + dst = append(dst, src[i:n]...) + dst = appendEscapedASCII(dst, byte(r)) + n += rn + i = n + default: + n += rn + } + } + dst = append(dst, src[i:n]...) + dst = append(dst, '"') + return dst, nil +} + +// reformatString consumes a JSON string from src and appends it to dst, +// reformatting it if necessary for the given escapeRune parameter. +// It returns the appended output and the remainder of the input. +func reformatString(dst, src []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, []byte, error) { + // TODO: Should this update valueFlags as input? + var flags valueFlags + n, err := consumeString(&flags, src, validateUTF8) + if err != nil { + return dst, src[n:], err + } + if preserveRaw || (escapeRune == nil && flags.isCanonical()) { + dst = append(dst, src[:n]...) // copy the string verbatim + return dst, src[n:], nil + } + + // TODO: Implement a direct, raw-to-raw reformat for strings. + // If the escapeRune option would have resulted in no changes to the output, + // it would be faster to simply append src to dst without going through + // an intermediary representation in a separate buffer. + b, _ := unescapeString(make([]byte, 0, n), src[:n]) + dst, _ = appendString(dst, string(b), validateUTF8, escapeRune) + return dst, src[n:], nil +} + +// appendNumber appends src to dst as a JSON number per RFC 7159, section 6. +// It formats numbers similar to the ES6 number-to-string conversion. +// See https://go.dev/issue/14135. +// +// The output is identical to ECMA-262, 6th edition, section 7.1.12.1 and with +// RFC 8785, section 3.2.2.3 for 64-bit floating-point numbers except for -0, +// which is formatted as -0 instead of just 0. +// +// For 32-bit floating-point numbers, +// the output is a 32-bit equivalent of the algorithm. +// Note that ECMA-262 specifies no algorithm for 32-bit numbers. +func appendNumber(dst []byte, src float64, bits int) []byte { + if bits == 32 { + src = float64(float32(src)) + } + + abs := math.Abs(src) + fmt := byte('f') + if abs != 0 { + if bits == 64 && (float64(abs) < 1e-6 || float64(abs) >= 1e21) || + bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + dst = strconv.AppendFloat(dst, src, fmt, -1, bits) + if fmt == 'e' { + // Clean up e-09 to e-9. + n := len(dst) + if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { + dst[n-2] = dst[n-1] + dst = dst[:n-1] + } + } + return dst +} + +// reformatNumber consumes a JSON string from src and appends it to dst, +// canonicalizing it if specified. +// It returns the appended output and the remainder of the input. +func reformatNumber(dst, src []byte, canonicalize bool) ([]byte, []byte, error) { + n, err := consumeNumber(src) + if err != nil { + return dst, src[n:], err + } + if !canonicalize { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + + // Canonicalize the number per RFC 8785, section 3.2.2.3. + // As an optimization, we can copy integer numbers below 2⁵³ verbatim. + const maxExactIntegerDigits = 16 // len(strconv.AppendUint(nil, 1<<53, 10)) + if n < maxExactIntegerDigits && consumeSimpleNumber(src[:n]) == n { + dst = append(dst, src[:n]...) // copy the number verbatim + return dst, src[n:], nil + } + fv, _ := strconv.ParseFloat(string(src[:n]), 64) + switch { + case fv == 0: + fv = 0 // normalize negative zero as just zero + case math.IsInf(fv, +1): + fv = +math.MaxFloat64 + case math.IsInf(fv, -1): + fv = -math.MaxFloat64 + } + return appendNumber(dst, fv, 64), src[n:], nil +} + +// appendHexUint16 appends src to dst as a 4-byte hexadecimal number. +func appendHexUint16(dst []byte, src uint16) []byte { + dst = append(dst, "0000"[1+(bits.Len16(src)-1)/4:]...) + dst = strconv.AppendUint(dst, uint64(src), 16) + return dst +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go new file mode 100644 index 000000000..35be8601e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/errors.go @@ -0,0 +1,183 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +const errorPrefix = "json: " + +// Error matches errors returned by this package according to errors.Is. +const Error = jsonError("json error") + +type jsonError string + +func (e jsonError) Error() string { + return string(e) +} +func (e jsonError) Is(target error) bool { + return e == target || target == Error +} + +type ioError struct { + action string // either "read" or "write" + err error +} + +func (e *ioError) Error() string { + return errorPrefix + e.action + " error: " + e.err.Error() +} +func (e *ioError) Unwrap() error { + return e.err +} +func (e *ioError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.err, target) +} + +// SemanticError describes an error determining the meaning +// of JSON data as Go data or vice-versa. +// +// The contents of this error as produced by this package may change over time. +type SemanticError struct { + requireKeyedLiterals + nonComparable + + action string // either "marshal" or "unmarshal" + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + // JSONPointer indicates that an error occurred within this JSON value + // as indicated using the JSON Pointer notation (see RFC 6901). + JSONPointer string + + // JSONKind is the JSON kind that could not be handled. + JSONKind Kind // may be zero if unknown + // GoType is the Go type that could not be handled. + GoType reflect.Type // may be nil if unknown + + // Err is the underlying error. + Err error // may be nil +} + +func (e *SemanticError) Error() string { + var sb strings.Builder + sb.WriteString(errorPrefix) + + // Hyrum-proof the error message by deliberately switching between + // two equivalent renderings of the same error message. + // The randomization is tied to the Hyrum-proofing already applied + // on map iteration in Go. + for phrase := range map[string]struct{}{"cannot": {}, "unable to": {}} { + sb.WriteString(phrase) + break // use whichever phrase we get in the first iteration + } + + // Format action. + var preposition string + switch e.action { + case "marshal": + sb.WriteString(" marshal") + preposition = " from" + case "unmarshal": + sb.WriteString(" unmarshal") + preposition = " into" + default: + sb.WriteString(" handle") + preposition = " with" + } + + // Format JSON kind. + var omitPreposition bool + switch e.JSONKind { + case 'n': + sb.WriteString(" JSON null") + case 'f', 't': + sb.WriteString(" JSON boolean") + case '"': + sb.WriteString(" JSON string") + case '0': + sb.WriteString(" JSON number") + case '{', '}': + sb.WriteString(" JSON object") + case '[', ']': + sb.WriteString(" JSON array") + default: + omitPreposition = true + } + + // Format Go type. + if e.GoType != nil { + if !omitPreposition { + sb.WriteString(preposition) + } + sb.WriteString(" Go value of type ") + sb.WriteString(e.GoType.String()) + } + + // Format where. + switch { + case e.JSONPointer != "": + sb.WriteString(" within JSON value at ") + sb.WriteString(strconv.Quote(e.JSONPointer)) + case e.ByteOffset > 0: + sb.WriteString(" after byte offset ") + sb.WriteString(strconv.FormatInt(e.ByteOffset, 10)) + } + + // Format underlying error. + if e.Err != nil { + sb.WriteString(": ") + sb.WriteString(e.Err.Error()) + } + + return sb.String() +} +func (e *SemanticError) Is(target error) bool { + return e == target || target == Error || errors.Is(e.Err, target) +} +func (e *SemanticError) Unwrap() error { + return e.Err +} + +// SyntacticError is a description of a syntactic error that occurred when +// encoding or decoding JSON according to the grammar. +// +// The contents of this error as produced by this package may change over time. +type SyntacticError struct { + requireKeyedLiterals + nonComparable + + // ByteOffset indicates that an error occurred after this byte offset. + ByteOffset int64 + str string +} + +func (e *SyntacticError) Error() string { + return errorPrefix + e.str +} +func (e *SyntacticError) Is(target error) bool { + return e == target || target == Error +} +func (e *SyntacticError) withOffset(pos int64) error { + return &SyntacticError{ByteOffset: pos, str: e.str} +} + +func newInvalidCharacterError(prefix []byte, where string) *SyntacticError { + what := quoteRune(prefix) + return &SyntacticError{str: "invalid character " + what + " " + where} +} + +func quoteRune(b []byte) string { + r, n := utf8.DecodeRune(b) + if r == utf8.RuneError && n == 1 { + return `'\x` + strconv.FormatUint(uint64(b[0]), 16) + `'` + } + return strconv.QuoteRune(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go new file mode 100644 index 000000000..c0ee36166 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fields.go @@ -0,0 +1,509 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +var errIgnoredField = errors.New("ignored field") + +type isZeroer interface { + IsZero() bool +} + +var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() + +type structFields struct { + flattened []structField // listed in depth-first ordering + byActualName map[string]*structField + byFoldedName map[string][]*structField + inlinedFallback *structField +} + +type structField struct { + id int // unique numeric ID in breadth-first ordering + index []int // index into a struct according to reflect.Type.FieldByIndex + typ reflect.Type + fncs *arshaler + isZero func(addressableValue) bool + isEmpty func(addressableValue) bool + fieldOptions +} + +func makeStructFields(root reflect.Type) (structFields, *SemanticError) { + var fs structFields + fs.byActualName = make(map[string]*structField, root.NumField()) + fs.byFoldedName = make(map[string][]*structField, root.NumField()) + + // ambiguous is a sentinel value to indicate that at least two fields + // at the same depth have the same name, and thus cancel each other out. + // This follows the same rules as selecting a field on embedded structs + // where the shallowest field takes precedence. If more than one field + // exists at the shallowest depth, then the selection is illegal. + // See https://go.dev/ref/spec#Selectors. + ambiguous := new(structField) + + // Setup a queue for a breath-first search. + var queueIndex int + type queueEntry struct { + typ reflect.Type + index []int + visitChildren bool // whether to recursively visit inlined field in this struct + } + queue := []queueEntry{{root, nil, true}} + seen := map[reflect.Type]bool{root: true} + + // Perform a breadth-first search over all reachable fields. + // This ensures that len(f.index) will be monotonically increasing. + for queueIndex < len(queue) { + qe := queue[queueIndex] + queueIndex++ + + t := qe.typ + inlinedFallbackIndex := -1 // index of last inlined fallback field in current struct + namesIndex := make(map[string]int) // index of each field with a given JSON object name in current struct + var hasAnyJSONTag bool // whether any Go struct field has a `json` tag + var hasAnyJSONField bool // whether any JSON serializable fields exist in current struct + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + _, hasTag := sf.Tag.Lookup("json") + hasAnyJSONTag = hasAnyJSONTag || hasTag + options, err := parseFieldOptions(sf) + if err != nil { + if err == errIgnoredField { + continue + } + return structFields{}, &SemanticError{GoType: t, Err: err} + } + hasAnyJSONField = true + f := structField{ + // Allocate a new slice (len=N+1) to hold both + // the parent index (len=N) and the current index (len=1). + // Do this to avoid clobbering the memory of the parent index. + index: append(append(make([]int, 0, len(qe.index)+1), qe.index...), i), + typ: sf.Type, + fieldOptions: options, + } + if sf.Anonymous && !f.hasName { + f.inline = true // implied by use of Go embedding without an explicit name + } + if f.inline || f.unknown { + // Handle an inlined field that serializes to/from + // zero or more JSON object members. + + if f.inline && f.unknown { + err := fmt.Errorf("Go struct field %s cannot have both `inline` and `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + switch f.fieldOptions { + case fieldOptions{name: f.name, quotedName: f.quotedName, inline: true}: + case fieldOptions{name: f.name, quotedName: f.quotedName, unknown: true}: + default: + err := fmt.Errorf("Go struct field %s cannot have any options other than `inline` or `unknown` specified", sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Unwrap one level of pointer indirection similar to how Go + // only allows embedding either T or *T, but not **T. + tf := f.typ + if tf.Kind() == reflect.Pointer && tf.Name() == "" { + tf = tf.Elem() + } + // Reject any types with custom serialization otherwise + // it becomes impossible to know what sub-fields to inline. + if which, _ := implementsWhich(tf, + jsonMarshalerV2Type, jsonMarshalerV1Type, textMarshalerType, + jsonUnmarshalerV2Type, jsonUnmarshalerV1Type, textUnmarshalerType, + ); which != nil && tf != rawValueType { + err := fmt.Errorf("inlined Go struct field %s of type %s must not implement JSON marshal or unmarshal methods", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Handle an inlined field that serializes to/from + // a finite number of JSON object members backed by a Go struct. + if tf.Kind() == reflect.Struct { + if f.unknown { + err := fmt.Errorf("inlined Go struct field %s of type %s with `unknown` tag must be a Go map of string key or a json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + if qe.visitChildren { + queue = append(queue, queueEntry{tf, f.index, !seen[tf]}) + } + seen[tf] = true + continue + } + + // Handle an inlined field that serializes to/from any number of + // JSON object members back by a Go map or RawValue. + switch { + case tf == rawValueType: + f.fncs = nil // specially handled in arshal_inlined.go + case tf.Kind() == reflect.Map && tf.Key() == stringType: + f.fncs = lookupArshaler(tf.Elem()) + default: + err := fmt.Errorf("inlined Go struct field %s of type %s must be a Go struct, Go map of string key, or json.RawValue", sf.Name, tf) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + + // Reject multiple inlined fallback fields within the same struct. + if inlinedFallbackIndex >= 0 { + err := fmt.Errorf("inlined Go struct fields %s and %s cannot both be a Go map or json.RawValue", t.Field(inlinedFallbackIndex).Name, sf.Name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + inlinedFallbackIndex = i + + // Multiple inlined fallback fields across different structs + // follow the same precedence rules as Go struct embedding. + if fs.inlinedFallback == nil { + fs.inlinedFallback = &f // store first occurrence at lowest depth + } else if len(fs.inlinedFallback.index) == len(f.index) { + fs.inlinedFallback = ambiguous // at least two occurrences at same depth + } + } else { + // Handle normal Go struct field that serializes to/from + // a single JSON object member. + + // Provide a function that uses a type's IsZero method. + switch { + case sf.Type.Kind() == reflect.Interface && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on a nil interface or + // non-nil interface with nil pointer. + return va.IsNil() || (va.Elem().Kind() == reflect.Pointer && va.Elem().IsNil()) || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Kind() == reflect.Pointer && sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { + // Avoid panics calling IsZero on nil pointer. + return va.IsNil() || va.Interface().(isZeroer).IsZero() + } + case sf.Type.Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Interface().(isZeroer).IsZero() } + case reflect.PointerTo(sf.Type).Implements(isZeroerType): + f.isZero = func(va addressableValue) bool { return va.Addr().Interface().(isZeroer).IsZero() } + } + + // Provide a function that can determine whether the value would + // serialize as an empty JSON value. + switch sf.Type.Kind() { + case reflect.String, reflect.Map, reflect.Array, reflect.Slice: + f.isEmpty = func(va addressableValue) bool { return va.Len() == 0 } + case reflect.Pointer, reflect.Interface: + f.isEmpty = func(va addressableValue) bool { return va.IsNil() } + } + + f.id = len(fs.flattened) + f.fncs = lookupArshaler(sf.Type) + fs.flattened = append(fs.flattened, f) + + // Reject user-specified names with invalid UTF-8. + if !utf8.ValidString(f.name) { + err := fmt.Errorf("Go struct field %s has JSON object name %q with invalid UTF-8", sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + // Reject multiple fields with same name within the same struct. + if j, ok := namesIndex[f.name]; ok { + err := fmt.Errorf("Go struct fields %s and %s conflict over JSON object name %q", t.Field(j).Name, sf.Name, f.name) + return structFields{}, &SemanticError{GoType: t, Err: err} + } + namesIndex[f.name] = i + + // Multiple fields of the same name across different structs + // follow the same precedence rules as Go struct embedding. + if f2 := fs.byActualName[f.name]; f2 == nil { + fs.byActualName[f.name] = &fs.flattened[len(fs.flattened)-1] // store first occurrence at lowest depth + } else if len(f2.index) == len(f.index) { + fs.byActualName[f.name] = ambiguous // at least two occurrences at same depth + } + } + } + + // NOTE: New users to the json package are occasionally surprised that + // unexported fields are ignored. This occurs by necessity due to our + // inability to directly introspect such fields with Go reflection + // without the use of unsafe. + // + // To reduce friction here, refuse to serialize any Go struct that + // has no JSON serializable fields, has at least one Go struct field, + // and does not have any `json` tags present. For example, + // errors returned by errors.New would fail to serialize. + isEmptyStruct := t.NumField() == 0 + if !isEmptyStruct && !hasAnyJSONTag && !hasAnyJSONField { + err := errors.New("Go struct has no exported fields") + return structFields{}, &SemanticError{GoType: t, Err: err} + } + } + + // Remove all fields that are duplicates. + // This may move elements forward to fill the holes from removed fields. + var n int + for _, f := range fs.flattened { + switch f2 := fs.byActualName[f.name]; { + case f2 == ambiguous: + delete(fs.byActualName, f.name) + case f2 == nil: + continue // may be nil due to previous delete + // TODO(https://go.dev/issue/45955): Use slices.Equal. + case reflect.DeepEqual(f.index, f2.index): + f.id = n + fs.flattened[n] = f + fs.byActualName[f.name] = &fs.flattened[n] // fix pointer to new location + n++ + } + } + fs.flattened = fs.flattened[:n] + if fs.inlinedFallback == ambiguous { + fs.inlinedFallback = nil + } + if len(fs.flattened) != len(fs.byActualName) { + panic(fmt.Sprintf("BUG: flattened list of fields mismatches fields mapped by name: %d != %d", len(fs.flattened), len(fs.byActualName))) + } + + // Sort the fields according to a depth-first ordering. + // This operation will cause pointers in byActualName to become incorrect, + // which we will correct in another loop shortly thereafter. + sort.Slice(fs.flattened, func(i, j int) bool { + si := fs.flattened[i].index + sj := fs.flattened[j].index + for len(si) > 0 && len(sj) > 0 { + switch { + case si[0] < sj[0]: + return true + case si[0] > sj[0]: + return false + default: + si = si[1:] + sj = sj[1:] + } + } + return len(si) < len(sj) + }) + + // Recompute the mapping of fields in the byActualName map. + // Pre-fold all names so that we can lookup folded names quickly. + for i, f := range fs.flattened { + foldedName := string(foldName([]byte(f.name))) + fs.byActualName[f.name] = &fs.flattened[i] + fs.byFoldedName[foldedName] = append(fs.byFoldedName[foldedName], &fs.flattened[i]) + } + for foldedName, fields := range fs.byFoldedName { + if len(fields) > 1 { + // The precedence order for conflicting nocase names + // is by breadth-first order, rather than depth-first order. + sort.Slice(fields, func(i, j int) bool { + return fields[i].id < fields[j].id + }) + fs.byFoldedName[foldedName] = fields + } + } + + return fs, nil +} + +type fieldOptions struct { + name string + quotedName string // quoted name per RFC 8785, section 3.2.2.2. + hasName bool + nocase bool + inline bool + unknown bool + omitzero bool + omitempty bool + string bool + format string +} + +// parseFieldOptions parses the `json` tag in a Go struct field as +// a structured set of options configuring parameters such as +// the JSON member name and other features. +// As a special case, it returns errIgnoredField if the field is ignored. +func parseFieldOptions(sf reflect.StructField) (out fieldOptions, err error) { + tag, hasTag := sf.Tag.Lookup("json") + + // Check whether this field is explicitly ignored. + if tag == "-" { + return fieldOptions{}, errIgnoredField + } + + // Check whether this field is unexported. + if !sf.IsExported() { + // In contrast to v1, v2 no longer forwards exported fields from + // embedded fields of unexported types since Go reflection does not + // allow the same set of operations that are available in normal cases + // of purely exported fields. + // See https://go.dev/issue/21357 and https://go.dev/issue/24153. + if sf.Anonymous { + return fieldOptions{}, fmt.Errorf("embedded Go struct field %s of an unexported type must be explicitly ignored with a `json:\"-\"` tag", sf.Type.Name()) + } + // Tag options specified on an unexported field suggests user error. + if hasTag { + return fieldOptions{}, fmt.Errorf("unexported Go struct field %s cannot have non-ignored `json:%q` tag", sf.Name, tag) + } + return fieldOptions{}, errIgnoredField + } + + // Determine the JSON member name for this Go field. A user-specified name + // may be provided as either an identifier or a single-quoted string. + // The single-quoted string allows arbitrary characters in the name. + // See https://go.dev/issue/2718 and https://go.dev/issue/3546. + out.name = sf.Name // always starts with an uppercase character + if len(tag) > 0 && !strings.HasPrefix(tag, ",") { + // For better compatibility with v1, accept almost any unescaped name. + n := len(tag) - len(strings.TrimLeftFunc(tag, func(r rune) bool { + return !strings.ContainsRune(",\\'\"`", r) // reserve comma, backslash, and quotes + })) + opt := tag[:n] + if n == 0 { + // Allow a single quoted string for arbitrary names. + opt, n, err = consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + } + out.hasName = true + out.name = opt + tag = tag[n:] + } + b, _ := appendString(nil, out.name, false, nil) + out.quotedName = string(b) + + // Handle any additional tag options (if any). + var wasFormat bool + seenOpts := make(map[string]bool) + for len(tag) > 0 { + // Consume comma delimiter. + if tag[0] != ',' { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid character %q before next option (expecting ',')", sf.Name, tag[0]) + } + tag = tag[len(","):] + if len(tag) == 0 { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: invalid trailing ',' character", sf.Name) + } + + // Consume and process the tag option. + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed `json` tag: %v", sf.Name, err) + } + rawOpt := tag[:n] + tag = tag[n:] + switch { + case wasFormat: + return fieldOptions{}, fmt.Errorf("Go struct field %s has `format` tag option that was not specified last", sf.Name) + case strings.HasPrefix(rawOpt, "'") && strings.TrimFunc(opt, isLetterOrDigit) == "": + return fieldOptions{}, fmt.Errorf("Go struct field %s has unnecessarily quoted appearance of `%s` tag option; specify `%s` instead", sf.Name, rawOpt, opt) + } + switch opt { + case "nocase": + out.nocase = true + case "inline": + out.inline = true + case "unknown": + out.unknown = true + case "omitzero": + out.omitzero = true + case "omitempty": + out.omitempty = true + case "string": + out.string = true + case "format": + if !strings.HasPrefix(tag, ":") { + return fieldOptions{}, fmt.Errorf("Go struct field %s is missing value for `format` tag option", sf.Name) + } + tag = tag[len(":"):] + opt, n, err := consumeTagOption(tag) + if err != nil { + return fieldOptions{}, fmt.Errorf("Go struct field %s has malformed value for `format` tag option: %v", sf.Name, err) + } + tag = tag[n:] + out.format = opt + wasFormat = true + default: + // Reject keys that resemble one of the supported options. + // This catches invalid mutants such as "omitEmpty" or "omit_empty". + normOpt := strings.ReplaceAll(strings.ToLower(opt), "_", "") + switch normOpt { + case "nocase", "inline", "unknown", "omitzero", "omitempty", "string", "format": + return fieldOptions{}, fmt.Errorf("Go struct field %s has invalid appearance of `%s` tag option; specify `%s` instead", sf.Name, opt, normOpt) + } + + // NOTE: Everything else is ignored. This does not mean it is + // forward compatible to insert arbitrary tag options since + // a future version of this package may understand that tag. + } + + // Reject duplicates. + if seenOpts[opt] { + return fieldOptions{}, fmt.Errorf("Go struct field %s has duplicate appearance of `%s` tag option", sf.Name, rawOpt) + } + seenOpts[opt] = true + } + return out, nil +} + +func consumeTagOption(in string) (string, int, error) { + switch r, _ := utf8.DecodeRuneInString(in); { + // Option as a Go identifier. + case r == '_' || unicode.IsLetter(r): + n := len(in) - len(strings.TrimLeftFunc(in, isLetterOrDigit)) + return in[:n], n, nil + // Option as a single-quoted string. + case r == '\'': + // The grammar is nearly identical to a double-quoted Go string literal, + // but uses single quotes as the terminators. The reason for a custom + // grammar is because both backtick and double quotes cannot be used + // verbatim in a struct tag. + // + // Convert a single-quoted string to a double-quote string and rely on + // strconv.Unquote to handle the rest. + var inEscape bool + b := []byte{'"'} + n := len(`'`) + for len(in) > n { + r, rn := utf8.DecodeRuneInString(in[n:]) + switch { + case inEscape: + if r == '\'' { + b = b[:len(b)-1] // remove escape character: `\'` => `'` + } + inEscape = false + case r == '\\': + inEscape = true + case r == '"': + b = append(b, '\\') // insert escape character: `"` => `\"` + case r == '\'': + b = append(b, '"') + n += len(`'`) + out, err := strconv.Unquote(string(b)) + if err != nil { + return "", 0, fmt.Errorf("invalid single-quoted string: %s", in[:n]) + } + return out, n, nil + } + b = append(b, in[n:][:rn]...) + n += rn + } + if n > 10 { + n = 10 // limit the amount of context printed in the error + } + return "", 0, fmt.Errorf("single-quoted string not terminated: %s...", in[:n]) + case len(in) == 0: + return "", 0, io.ErrUnexpectedEOF + default: + return "", 0, fmt.Errorf("invalid character %q at start of option (expecting Unicode letter or single quote)", r) + } +} + +func isLetterOrDigit(r rune) bool { + return r == '_' || unicode.IsLetter(r) || unicode.IsNumber(r) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go new file mode 100644 index 000000000..9ab735814 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/fold.go @@ -0,0 +1,56 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "unicode" + "unicode/utf8" +) + +// foldName returns a folded string such that foldName(x) == foldName(y) +// is similar to strings.EqualFold(x, y), but ignores underscore and dashes. +// This allows foldName to match common naming conventions. +func foldName(in []byte) []byte { + // This is inlinable to take advantage of "function outlining". + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + var arr [32]byte // large enough for most JSON names + return appendFoldedName(arr[:0], in) +} +func appendFoldedName(out, in []byte) []byte { + for i := 0; i < len(in); { + // Handle single-byte ASCII. + if c := in[i]; c < utf8.RuneSelf { + if c != '_' && c != '-' { + if 'a' <= c && c <= 'z' { + c -= 'a' - 'A' + } + out = append(out, c) + } + i++ + continue + } + // Handle multi-byte Unicode. + r, n := utf8.DecodeRune(in[i:]) + out = utf8.AppendRune(out, foldRune(r)) + i += n + } + return out +} + +// foldRune is a variation on unicode.SimpleFold that returns the same rune +// for all runes in the same fold set. +// +// Invariant: +// +// foldRune(x) == foldRune(y) ⇔ strings.EqualFold(string(x), string(y)) +func foldRune(r rune) rune { + for { + r2 := unicode.SimpleFold(r) + if r2 <= r { + return r2 // smallest character in the fold set + } + r = r2 + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go new file mode 100644 index 000000000..700a56db0 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/intern.go @@ -0,0 +1,86 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/binary" + "math/bits" +) + +// stringCache is a cache for strings converted from a []byte. +type stringCache [256]string // 256*unsafe.Sizeof(string("")) => 4KiB + +// make returns the string form of b. +// It returns a pre-allocated string from c if present, otherwise +// it allocates a new string, inserts it into the cache, and returns it. +func (c *stringCache) make(b []byte) string { + const ( + minCachedLen = 2 // single byte strings are already interned by the runtime + maxCachedLen = 256 // large enough for UUIDs, IPv6 addresses, SHA-256 checksums, etc. + ) + if c == nil || len(b) < minCachedLen || len(b) > maxCachedLen { + return string(b) + } + + // Compute a hash from the fixed-width prefix and suffix of the string. + // This ensures hashing a string is a constant time operation. + var h uint32 + switch { + case len(b) >= 8: + lo := binary.LittleEndian.Uint64(b[:8]) + hi := binary.LittleEndian.Uint64(b[len(b)-8:]) + h = hash64(uint32(lo), uint32(lo>>32)) ^ hash64(uint32(hi), uint32(hi>>32)) + case len(b) >= 4: + lo := binary.LittleEndian.Uint32(b[:4]) + hi := binary.LittleEndian.Uint32(b[len(b)-4:]) + h = hash64(lo, hi) + case len(b) >= 2: + lo := binary.LittleEndian.Uint16(b[:2]) + hi := binary.LittleEndian.Uint16(b[len(b)-2:]) + h = hash64(uint32(lo), uint32(hi)) + } + + // Check the cache for the string. + i := h % uint32(len(*c)) + if s := (*c)[i]; s == string(b) { + return s + } + s := string(b) + (*c)[i] = s + return s +} + +// hash64 returns the hash of two uint32s as a single uint32. +func hash64(lo, hi uint32) uint32 { + // If avalanche=true, this is identical to XXH32 hash on a 8B string: + // var b [8]byte + // binary.LittleEndian.PutUint32(b[:4], lo) + // binary.LittleEndian.PutUint32(b[4:], hi) + // return xxhash.Sum32(b[:]) + const ( + prime1 = 0x9e3779b1 + prime2 = 0x85ebca77 + prime3 = 0xc2b2ae3d + prime4 = 0x27d4eb2f + prime5 = 0x165667b1 + ) + h := prime5 + uint32(8) + h += lo * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + h += hi * prime3 + h = bits.RotateLeft32(h, 17) * prime4 + // Skip final mix (avalanche) step of XXH32 for performance reasons. + // Empirical testing shows that the improvements in unbiased distribution + // does not outweigh the extra cost in computational complexity. + const avalanche = false + if avalanche { + h ^= h >> 15 + h *= prime2 + h ^= h >> 13 + h *= prime3 + h ^= h >> 16 + } + return h +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go new file mode 100644 index 000000000..60e93270f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -0,0 +1,182 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "io" + "math/bits" + "sort" + "sync" +) + +// TODO(https://go.dev/issue/47657): Use sync.PoolOf. + +var ( + // This owns the internal buffer since there is no io.Writer to output to. + // Since the buffer can get arbitrarily large in normal usage, + // there is statistical tracking logic to determine whether to recycle + // the internal buffer or not based on a history of utilization. + bufferedEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON before flushing it to the underlying io.Writer. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferEncoderPool = &sync.Pool{New: func() any { return new(Encoder) }} +) + +// bufferStatistics is statistics to track buffer utilization. +// It is used to determine whether to recycle a buffer or not +// to avoid https://go.dev/issue/23199. +type bufferStatistics struct { + strikes int // number of times the buffer was under-utilized + prevLen int // length of previous buffer +} + +func getBufferedEncoder(o EncodeOptions) *Encoder { + e := bufferedEncoderPool.Get().(*Encoder) + if e.buf == nil { + // Round up to nearest 2ⁿ to make best use of malloc size classes. + // See runtime/sizeclasses.go on Go1.15. + // Logical OR with 63 to ensure 64 as the minimum buffer size. + n := 1 << bits.Len(uint(e.bufStats.prevLen|63)) + e.buf = make([]byte, 0, n) + } + e.reset(e.buf[:0], nil, o) + return e +} +func putBufferedEncoder(e *Encoder) { + // Recycle large buffers only if sufficiently utilized. + // If a buffer is under-utilized enough times sequentially, + // then it is discarded, ensuring that a single large buffer + // won't be kept alive by a continuous stream of small usages. + // + // The worst case utilization is computed as: + // MIN_UTILIZATION_THRESHOLD / (1 + MAX_NUM_STRIKES) + // + // For the constants chosen below, this is (25%)/(1+4) ⇒ 5%. + // This may seem low, but it ensures a lower bound on + // the absolute worst-case utilization. Without this check, + // this would be theoretically 0%, which is infinitely worse. + // + // See https://go.dev/issue/27735. + switch { + case cap(e.buf) <= 4<<10: // always recycle buffers smaller than 4KiB + e.bufStats.strikes = 0 + case cap(e.buf)/4 <= len(e.buf): // at least 25% utilization + e.bufStats.strikes = 0 + case e.bufStats.strikes < 4: // at most 4 strikes + e.bufStats.strikes++ + default: // discard the buffer; too large and too often under-utilized + e.bufStats.strikes = 0 + e.bufStats.prevLen = len(e.buf) // heuristic for size to allocate next time + e.buf = nil + } + bufferedEncoderPool.Put(e) +} + +func getStreamingEncoder(w io.Writer, o EncodeOptions) *Encoder { + if _, ok := w.(*bytes.Buffer); ok { + e := bytesBufferEncoderPool.Get().(*Encoder) + e.reset(nil, w, o) // buffer taken from bytes.Buffer + return e + } else { + e := streamingEncoderPool.Get().(*Encoder) + e.reset(e.buf[:0], w, o) // preserve existing buffer + return e + } +} +func putStreamingEncoder(e *Encoder) { + if _, ok := e.wr.(*bytes.Buffer); ok { + bytesBufferEncoderPool.Put(e) + } else { + if cap(e.buf) > 64<<10 { + e.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingEncoderPool.Put(e) + } +} + +var ( + // This does not own the internal buffer since it is externally provided. + bufferedDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This owns the internal buffer, but it is only used to temporarily store + // buffered JSON fetched from the underlying io.Reader. + // In a sufficiently efficient streaming mode, we do not expect the buffer + // to grow arbitrarily large. Thus, we avoid recycling large buffers. + streamingDecoderPool = &sync.Pool{New: func() any { return new(Decoder) }} + + // This does not own the internal buffer since + // it is taken directly from the provided bytes.Buffer. + bytesBufferDecoderPool = bufferedDecoderPool +) + +func getBufferedDecoder(b []byte, o DecodeOptions) *Decoder { + d := bufferedDecoderPool.Get().(*Decoder) + d.reset(b, nil, o) + return d +} +func putBufferedDecoder(d *Decoder) { + bufferedDecoderPool.Put(d) +} + +func getStreamingDecoder(r io.Reader, o DecodeOptions) *Decoder { + if _, ok := r.(*bytes.Buffer); ok { + d := bytesBufferDecoderPool.Get().(*Decoder) + d.reset(nil, r, o) // buffer taken from bytes.Buffer + return d + } else { + d := streamingDecoderPool.Get().(*Decoder) + d.reset(d.buf[:0], r, o) // preserve existing buffer + return d + } +} +func putStreamingDecoder(d *Decoder) { + if _, ok := d.rd.(*bytes.Buffer); ok { + bytesBufferDecoderPool.Put(d) + } else { + if cap(d.buf) > 64<<10 { + d.buf = nil // avoid pinning arbitrarily large amounts of memory + } + streamingDecoderPool.Put(d) + } +} + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go new file mode 100644 index 000000000..ee14c753f --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -0,0 +1,747 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +var ( + errMissingName = &SyntacticError{str: "missing string for object name"} + errMissingColon = &SyntacticError{str: "missing character ':' after object name"} + errMissingValue = &SyntacticError{str: "missing value after object name"} + errMissingComma = &SyntacticError{str: "missing character ',' after object or array value"} + errMismatchDelim = &SyntacticError{str: "mismatching structural token for object or array"} +) + +const errInvalidNamespace = jsonError("object namespace is in an invalid state") + +type state struct { + // tokens validates whether the next token kind is valid. + tokens stateMachine + + // names is a stack of object names. + // Not used if AllowDuplicateNames is true. + names objectNameStack + + // namespaces is a stack of object namespaces. + // For performance reasons, Encoder or Decoder may not update this + // if Marshal or Unmarshal is able to track names in a more efficient way. + // See makeMapArshaler and makeStructArshaler. + // Not used if AllowDuplicateNames is true. + namespaces objectNamespaceStack +} + +func (s *state) reset() { + s.tokens.reset() + s.names.reset() + s.namespaces.reset() +} + +// appendStackPointer appends a JSON Pointer (RFC 6901) to the current value. +// The returned pointer is only accurate if s.names is populated, +// otherwise it uses the numeric index as the object member name. +// +// Invariant: Must call s.names.copyQuotedBuffer beforehand. +func (s state) appendStackPointer(b []byte) []byte { + var objectDepth int + for i := 1; i < s.tokens.depth(); i++ { + e := s.tokens.index(i) + if e.length() == 0 { + break // empty object or array + } + b = append(b, '/') + switch { + case e.isObject(): + if objectDepth < s.names.length() { + for _, c := range s.names.getUnquoted(objectDepth) { + // Per RFC 6901, section 3, escape '~' and '/' characters. + switch c { + case '~': + b = append(b, "~0"...) + case '/': + b = append(b, "~1"...) + default: + b = append(b, c) + } + } + } else { + // Since the names stack is unpopulated, the name is unknown. + // As a best-effort replacement, use the numeric member index. + // While inaccurate, it produces a syntactically valid pointer. + b = strconv.AppendUint(b, uint64((e.length()-1)/2), 10) + } + objectDepth++ + case e.isArray(): + b = strconv.AppendUint(b, uint64(e.length()-1), 10) + } + } + return b +} + +// stateMachine is a push-down automaton that validates whether +// a sequence of tokens is valid or not according to the JSON grammar. +// It is useful for both encoding and decoding. +// +// It is a stack where each entry represents a nested JSON object or array. +// The stack has a minimum depth of 1 where the first level is a +// virtual JSON array to handle a stream of top-level JSON values. +// The top-level virtual JSON array is special in that it doesn't require commas +// between each JSON value. +// +// For performance, most methods are carefully written to be inlineable. +// The zero value is a valid state machine ready for use. +type stateMachine struct { + stack []stateEntry + last stateEntry +} + +// reset resets the state machine. +// The machine always starts with a minimum depth of 1. +func (m *stateMachine) reset() { + m.stack = m.stack[:0] + if cap(m.stack) > 1<<10 { + m.stack = nil + } + m.last = stateTypeArray +} + +// depth is the current nested depth of JSON objects and arrays. +// It is one-indexed (i.e., top-level values have a depth of 1). +func (m stateMachine) depth() int { + return len(m.stack) + 1 +} + +// index returns a reference to the ith entry. +// It is only valid until the next push method call. +func (m *stateMachine) index(i int) *stateEntry { + if i == len(m.stack) { + return &m.last + } + return &m.stack[i] +} + +// depthLength reports the current nested depth and +// the length of the last JSON object or array. +func (m stateMachine) depthLength() (int, int) { + return m.depth(), m.last.length() +} + +// appendLiteral appends a JSON literal as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendLiteral() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendString appends a JSON string as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendString() error { + switch { + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + return nil + } +} + +// appendNumber appends a JSON number as the next token in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) appendNumber() error { + return m.appendLiteral() +} + +// pushObject appends a JSON start object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushObject() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeObject + return nil + } +} + +// popObject appends a JSON end object token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popObject() error { + switch { + case !m.last.isObject(): + return errMismatchDelim + case m.last.needObjectValue(): + return errMissingValue + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// pushArray appends a JSON start array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) pushArray() error { + switch { + case m.last.needObjectName(): + return errMissingName + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last.increment() + m.stack = append(m.stack, m.last) + m.last = stateTypeArray + return nil + } +} + +// popArray appends a JSON end array token as next in the sequence. +// If an error is returned, the state is not mutated. +func (m *stateMachine) popArray() error { + switch { + case !m.last.isArray() || len(m.stack) == 0: // forbid popping top-level virtual JSON array + return errMismatchDelim + case !m.last.isValidNamespace(): + return errInvalidNamespace + default: + m.last = m.stack[len(m.stack)-1] + m.stack = m.stack[:len(m.stack)-1] + return nil + } +} + +// needIndent reports whether indent whitespace should be injected. +// A zero value means that no whitespace should be injected. +// A positive value means '\n', indentPrefix, and (n-1) copies of indentBody +// should be appended to the output immediately before the next token. +func (m stateMachine) needIndent(next Kind) (n int) { + willEnd := next == '}' || next == ']' + switch { + case m.depth() == 1: + return 0 // top-level values are never indented + case m.last.length() == 0 && willEnd: + return 0 // an empty object or array is never indented + case m.last.length() == 0 || m.last.needImplicitComma(next): + return m.depth() + case willEnd: + return m.depth() - 1 + default: + return 0 + } +} + +// mayAppendDelim appends a colon or comma that may precede the next token. +func (m stateMachine) mayAppendDelim(b []byte, next Kind) []byte { + switch { + case m.last.needImplicitColon(): + return append(b, ':') + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return append(b, ',') + default: + return b + } +} + +// needDelim reports whether a colon or comma token should be implicitly emitted +// before the next token of the specified kind. +// A zero value means no delimiter should be emitted. +func (m stateMachine) needDelim(next Kind) (delim byte) { + switch { + case m.last.needImplicitColon(): + return ':' + case m.last.needImplicitComma(next) && len(m.stack) != 0: // comma not needed for top-level values + return ',' + default: + return 0 + } +} + +// checkDelim reports whether the specified delimiter should be there given +// the kind of the next token that appears immediately afterwards. +func (m stateMachine) checkDelim(delim byte, next Kind) error { + switch needDelim := m.needDelim(next); { + case needDelim == delim: + return nil + case needDelim == ':': + return errMissingColon + case needDelim == ',': + return errMissingComma + default: + return newInvalidCharacterError([]byte{delim}, "before next token") + } +} + +// invalidateDisabledNamespaces marks all disabled namespaces as invalid. +// +// For efficiency, Marshal and Unmarshal may disable namespaces since there are +// more efficient ways to track duplicate names. However, if an error occurs, +// the namespaces in Encoder or Decoder will be left in an inconsistent state. +// Mark the namespaces as invalid so that future method calls on +// Encoder or Decoder will return an error. +func (m *stateMachine) invalidateDisabledNamespaces() { + for i := 0; i < m.depth(); i++ { + e := m.index(i) + if !e.isActiveNamespace() { + e.invalidateNamespace() + } + } +} + +// stateEntry encodes several artifacts within a single unsigned integer: +// - whether this represents a JSON object or array, +// - whether this object should check for duplicate names, and +// - how many elements are in this JSON object or array. +type stateEntry uint64 + +const ( + // The type mask (1 bit) records whether this is a JSON object or array. + stateTypeMask stateEntry = 0x8000_0000_0000_0000 + stateTypeObject stateEntry = 0x8000_0000_0000_0000 + stateTypeArray stateEntry = 0x0000_0000_0000_0000 + + // The name check mask (2 bit) records whether to update + // the namespaces for the current JSON object and + // whether the namespace is valid. + stateNamespaceMask stateEntry = 0x6000_0000_0000_0000 + stateDisableNamespace stateEntry = 0x4000_0000_0000_0000 + stateInvalidNamespace stateEntry = 0x2000_0000_0000_0000 + + // The count mask (61 bits) records the number of elements. + stateCountMask stateEntry = 0x1fff_ffff_ffff_ffff + stateCountLSBMask stateEntry = 0x0000_0000_0000_0001 + stateCountOdd stateEntry = 0x0000_0000_0000_0001 + stateCountEven stateEntry = 0x0000_0000_0000_0000 +) + +// length reports the number of elements in the JSON object or array. +// Each name and value in an object entry is treated as a separate element. +func (e stateEntry) length() int { + return int(e & stateCountMask) +} + +// isObject reports whether this is a JSON object. +func (e stateEntry) isObject() bool { + return e&stateTypeMask == stateTypeObject +} + +// isArray reports whether this is a JSON array. +func (e stateEntry) isArray() bool { + return e&stateTypeMask == stateTypeArray +} + +// needObjectName reports whether the next token must be a JSON string, +// which is necessary for JSON object names. +func (e stateEntry) needObjectName() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountEven +} + +// needImplicitColon reports whether an colon should occur next, +// which always occurs after JSON object names. +func (e stateEntry) needImplicitColon() bool { + return e.needObjectValue() +} + +// needObjectValue reports whether the next token must be a JSON value, +// which is necessary after every JSON object name. +func (e stateEntry) needObjectValue() bool { + return e&(stateTypeMask|stateCountLSBMask) == stateTypeObject|stateCountOdd +} + +// needImplicitComma reports whether an comma should occur next, +// which always occurs after a value in a JSON object or array +// before the next value (or name). +func (e stateEntry) needImplicitComma(next Kind) bool { + return !e.needObjectValue() && e.length() > 0 && next != '}' && next != ']' +} + +// increment increments the number of elements for the current object or array. +// This assumes that overflow won't practically be an issue since +// 1< 0. +func (e *stateEntry) decrement() { + (*e)-- +} + +// disableNamespace disables the JSON object namespace such that the +// Encoder or Decoder no longer updates the namespace. +func (e *stateEntry) disableNamespace() { + *e |= stateDisableNamespace +} + +// isActiveNamespace reports whether the JSON object namespace is actively +// being updated and used for duplicate name checks. +func (e stateEntry) isActiveNamespace() bool { + return e&(stateDisableNamespace) == 0 +} + +// invalidateNamespace marks the JSON object namespace as being invalid. +func (e *stateEntry) invalidateNamespace() { + *e |= stateInvalidNamespace +} + +// isValidNamespace reports whether the JSON object namespace is valid. +func (e stateEntry) isValidNamespace() bool { + return e&(stateInvalidNamespace) == 0 +} + +// objectNameStack is a stack of names when descending into a JSON object. +// In contrast to objectNamespaceStack, this only has to remember a single name +// per JSON object. +// +// This data structure may contain offsets to encodeBuffer or decodeBuffer. +// It violates clean abstraction of layers, but is significantly more efficient. +// This ensures that popping and pushing in the common case is a trivial +// push/pop of an offset integer. +// +// The zero value is an empty names stack ready for use. +type objectNameStack struct { + // offsets is a stack of offsets for each name. + // A non-negative offset is the ending offset into the local names buffer. + // A negative offset is the bit-wise inverse of a starting offset into + // a remote buffer (e.g., encodeBuffer or decodeBuffer). + // A math.MinInt offset at the end implies that the last object is empty. + // Invariant: Positive offsets always occur before negative offsets. + offsets []int + // unquotedNames is a back-to-back concatenation of names. + unquotedNames []byte +} + +func (ns *objectNameStack) reset() { + ns.offsets = ns.offsets[:0] + ns.unquotedNames = ns.unquotedNames[:0] + if cap(ns.offsets) > 1<<6 { + ns.offsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.unquotedNames) > 1<<10 { + ns.unquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +func (ns *objectNameStack) length() int { + return len(ns.offsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +// It returns an empty string if the last object is empty. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) getUnquoted(i int) []byte { + ns.ensureCopiedBuffer() + if i == 0 { + return ns.unquotedNames[:ns.offsets[0]] + } else { + return ns.unquotedNames[ns.offsets[i-1]:ns.offsets[i-0]] + } +} + +// invalidOffset indicates that the last JSON object currently has no name. +const invalidOffset = math.MinInt + +// push descends into a nested JSON object. +func (ns *objectNameStack) push() { + ns.offsets = append(ns.offsets, invalidOffset) +} + +// replaceLastQuotedOffset replaces the last name with the starting offset +// to the quoted name in some remote buffer. All offsets provided must be +// relative to the same buffer until copyQuotedBuffer is called. +func (ns *objectNameStack) replaceLastQuotedOffset(i int) { + // Use bit-wise inversion instead of naive multiplication by -1 to avoid + // ambiguity regarding zero (which is a valid offset into the names field). + // Bit-wise inversion is mathematically equivalent to -i-1, + // such that 0 becomes -1, 1 becomes -2, and so forth. + // This ensures that remote offsets are always negative. + ns.offsets[len(ns.offsets)-1] = ^i +} + +// replaceLastUnquotedName replaces the last name with the provided name. +// +// Invariant: Must call copyQuotedBuffer beforehand. +func (ns *objectNameStack) replaceLastUnquotedName(s string) { + ns.ensureCopiedBuffer() + var startOffset int + if len(ns.offsets) > 1 { + startOffset = ns.offsets[len(ns.offsets)-2] + } + ns.unquotedNames = append(ns.unquotedNames[:startOffset], s...) + ns.offsets[len(ns.offsets)-1] = len(ns.unquotedNames) +} + +// clearLast removes any name in the last JSON object. +// It is semantically equivalent to ns.push followed by ns.pop. +func (ns *objectNameStack) clearLast() { + ns.offsets[len(ns.offsets)-1] = invalidOffset +} + +// pop ascends out of a nested JSON object. +func (ns *objectNameStack) pop() { + ns.offsets = ns.offsets[:len(ns.offsets)-1] +} + +// copyQuotedBuffer copies names from the remote buffer into the local names +// buffer so that there are no more offset references into the remote buffer. +// This allows the remote buffer to change contents without affecting +// the names that this data structure is trying to remember. +func (ns *objectNameStack) copyQuotedBuffer(b []byte) { + // Find the first negative offset. + var i int + for i = len(ns.offsets) - 1; i >= 0 && ns.offsets[i] < 0; i-- { + continue + } + + // Copy each name from the remote buffer into the local buffer. + for i = i + 1; i < len(ns.offsets); i++ { + if i == len(ns.offsets)-1 && ns.offsets[i] == invalidOffset { + if i == 0 { + ns.offsets[i] = 0 + } else { + ns.offsets[i] = ns.offsets[i-1] + } + break // last JSON object had a push without any names + } + + // As a form of Hyrum proofing, we write an invalid character into the + // buffer to make misuse of Decoder.ReadToken more obvious. + // We need to undo that mutation here. + quotedName := b[^ns.offsets[i]:] + if quotedName[0] == invalidateBufferByte { + quotedName[0] = '"' + } + + // Append the unquoted name to the local buffer. + var startOffset int + if i > 0 { + startOffset = ns.offsets[i-1] + } + if n := consumeSimpleString(quotedName); n > 0 { + ns.unquotedNames = append(ns.unquotedNames[:startOffset], quotedName[len(`"`):n-len(`"`)]...) + } else { + ns.unquotedNames, _ = unescapeString(ns.unquotedNames[:startOffset], quotedName) + } + ns.offsets[i] = len(ns.unquotedNames) + } +} + +func (ns *objectNameStack) ensureCopiedBuffer() { + if len(ns.offsets) > 0 && ns.offsets[len(ns.offsets)-1] < 0 { + panic("BUG: copyQuotedBuffer not called beforehand") + } +} + +// objectNamespaceStack is a stack of object namespaces. +// This data structure assists in detecting duplicate names. +type objectNamespaceStack []objectNamespace + +// reset resets the object namespace stack. +func (nss *objectNamespaceStack) reset() { + if cap(*nss) > 1<<10 { + *nss = nil + } + *nss = (*nss)[:0] +} + +// push starts a new namespace for a nested JSON object. +func (nss *objectNamespaceStack) push() { + if cap(*nss) > len(*nss) { + *nss = (*nss)[:len(*nss)+1] + nss.last().reset() + } else { + *nss = append(*nss, objectNamespace{}) + } +} + +// last returns a pointer to the last JSON object namespace. +func (nss objectNamespaceStack) last() *objectNamespace { + return &nss[len(nss)-1] +} + +// pop terminates the namespace for a nested JSON object. +func (nss *objectNamespaceStack) pop() { + *nss = (*nss)[:len(*nss)-1] +} + +// objectNamespace is the namespace for a JSON object. +// In contrast to objectNameStack, this needs to remember a all names +// per JSON object. +// +// The zero value is an empty namespace ready for use. +type objectNamespace struct { + // It relies on a linear search over all the names before switching + // to use a Go map for direct lookup. + + // endOffsets is a list of offsets to the end of each name in buffers. + // The length of offsets is the number of names in the namespace. + endOffsets []uint + // allUnquotedNames is a back-to-back concatenation of every name in the namespace. + allUnquotedNames []byte + // mapNames is a Go map containing every name in the namespace. + // Only valid if non-nil. + mapNames map[string]struct{} +} + +// reset resets the namespace to be empty. +func (ns *objectNamespace) reset() { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + ns.mapNames = nil + if cap(ns.endOffsets) > 1<<6 { + ns.endOffsets = nil // avoid pinning arbitrarily large amounts of memory + } + if cap(ns.allUnquotedNames) > 1<<10 { + ns.allUnquotedNames = nil // avoid pinning arbitrarily large amounts of memory + } +} + +// length reports the number of names in the namespace. +func (ns *objectNamespace) length() int { + return len(ns.endOffsets) +} + +// getUnquoted retrieves the ith unquoted name in the namespace. +func (ns *objectNamespace) getUnquoted(i int) []byte { + if i == 0 { + return ns.allUnquotedNames[:ns.endOffsets[0]] + } else { + return ns.allUnquotedNames[ns.endOffsets[i-1]:ns.endOffsets[i-0]] + } +} + +// lastUnquoted retrieves the last name in the namespace. +func (ns *objectNamespace) lastUnquoted() []byte { + return ns.getUnquoted(ns.length() - 1) +} + +// insertQuoted inserts a name and reports whether it was inserted, +// which only occurs if name is not already in the namespace. +// The provided name must be a valid JSON string. +func (ns *objectNamespace) insertQuoted(name []byte, isVerbatim bool) bool { + if isVerbatim { + name = name[len(`"`) : len(name)-len(`"`)] + } + return ns.insert(name, !isVerbatim) +} +func (ns *objectNamespace) insertUnquoted(name []byte) bool { + return ns.insert(name, false) +} +func (ns *objectNamespace) insert(name []byte, quoted bool) bool { + var allNames []byte + if quoted { + allNames, _ = unescapeString(ns.allUnquotedNames, name) + } else { + allNames = append(ns.allUnquotedNames, name...) + } + name = allNames[len(ns.allUnquotedNames):] + + // Switch to a map if the buffer is too large for linear search. + // This does not add the current name to the map. + if ns.mapNames == nil && (ns.length() > 64 || len(ns.allUnquotedNames) > 1024) { + ns.mapNames = make(map[string]struct{}) + var startOffset uint + for _, endOffset := range ns.endOffsets { + name := ns.allUnquotedNames[startOffset:endOffset] + ns.mapNames[string(name)] = struct{}{} // allocates a new string + startOffset = endOffset + } + } + + if ns.mapNames == nil { + // Perform linear search over the buffer to find matching names. + // It provides O(n) lookup, but does not require any allocations. + var startOffset uint + for _, endOffset := range ns.endOffsets { + if string(ns.allUnquotedNames[startOffset:endOffset]) == string(name) { + return false + } + startOffset = endOffset + } + } else { + // Use the map if it is populated. + // It provides O(1) lookup, but requires a string allocation per name. + if _, ok := ns.mapNames[string(name)]; ok { + return false + } + ns.mapNames[string(name)] = struct{}{} // allocates a new string + } + + ns.allUnquotedNames = allNames + ns.endOffsets = append(ns.endOffsets, uint(len(ns.allUnquotedNames))) + return true +} + +// removeLast removes the last name in the namespace. +func (ns *objectNamespace) removeLast() { + if ns.mapNames != nil { + delete(ns.mapNames, string(ns.lastUnquoted())) + } + if ns.length()-1 == 0 { + ns.endOffsets = ns.endOffsets[:0] + ns.allUnquotedNames = ns.allUnquotedNames[:0] + } else { + ns.endOffsets = ns.endOffsets[:ns.length()-1] + ns.allUnquotedNames = ns.allUnquotedNames[:ns.endOffsets[ns.length()-1]] + } +} + +type uintSet64 uint64 + +func (s uintSet64) has(i uint) bool { return s&(1< 0 } +func (s *uintSet64) set(i uint) { *s |= 1 << i } + +// uintSet is a set of unsigned integers. +// It is optimized for most integers being close to zero. +type uintSet struct { + lo uintSet64 + hi []uintSet64 +} + +// has reports whether i is in the set. +func (s *uintSet) has(i uint) bool { + if i < 64 { + return s.lo.has(i) + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + return iHi < len(s.hi) && s.hi[iHi].has(iLo) + } +} + +// insert inserts i into the set and reports whether it was the first insertion. +func (s *uintSet) insert(i uint) bool { + // TODO: Make this inlineable at least for the lower 64-bit case. + if i < 64 { + has := s.lo.has(i) + s.lo.set(i) + return !has + } else { + i -= 64 + iHi, iLo := int(i/64), i%64 + if iHi >= len(s.hi) { + s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) + s.hi = s.hi[:cap(s.hi)] + } + has := s.hi[iHi].has(iLo) + s.hi[iHi].set(iLo) + return !has + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go new file mode 100644 index 000000000..9acba7dad --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -0,0 +1,522 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "strconv" +) + +// NOTE: Token is analogous to v1 json.Token. + +const ( + maxInt64 = math.MaxInt64 + minInt64 = math.MinInt64 + maxUint64 = math.MaxUint64 + minUint64 = 0 // for consistency and readability purposes + + invalidTokenPanic = "invalid json.Token; it has been voided by a subsequent json.Decoder call" +) + +// Token represents a lexical JSON token, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - a start or end delimiter for a JSON object (i.e., { or } ) +// - a start or end delimiter for a JSON array (i.e., [ or ] ) +// +// A Token cannot represent entire array or object values, while a RawValue can. +// There is no Token to represent commas and colons since +// these structural tokens can be inferred from the surrounding context. +type Token struct { + nonComparable + + // Tokens can exist in either a "raw" or an "exact" form. + // Tokens produced by the Decoder are in the "raw" form. + // Tokens returned by constructors are usually in the "exact" form. + // The Encoder accepts Tokens in either the "raw" or "exact" form. + // + // The following chart shows the possible values for each Token type: + // ╔═════════════════╦════════════╤════════════╤════════════╗ + // ║ Token type ║ raw field │ str field │ num field ║ + // ╠═════════════════╬════════════╪════════════╪════════════╣ + // ║ null (raw) ║ "null" │ "" │ 0 ║ + // ║ false (raw) ║ "false" │ "" │ 0 ║ + // ║ true (raw) ║ "true" │ "" │ 0 ║ + // ║ string (raw) ║ non-empty │ "" │ offset ║ + // ║ string (string) ║ nil │ non-empty │ 0 ║ + // ║ number (raw) ║ non-empty │ "" │ offset ║ + // ║ number (float) ║ nil │ "f" │ non-zero ║ + // ║ number (int64) ║ nil │ "i" │ non-zero ║ + // ║ number (uint64) ║ nil │ "u" │ non-zero ║ + // ║ object (delim) ║ "{" or "}" │ "" │ 0 ║ + // ║ array (delim) ║ "[" or "]" │ "" │ 0 ║ + // ╚═════════════════╩════════════╧════════════╧════════════╝ + // + // Notes: + // - For tokens stored in "raw" form, the num field contains the + // absolute offset determined by raw.previousOffsetStart(). + // The buffer itself is stored in raw.previousBuffer(). + // - JSON literals and structural characters are always in the "raw" form. + // - JSON strings and numbers can be in either "raw" or "exact" forms. + // - The exact zero value of JSON strings and numbers in the "exact" forms + // have ambiguous representation. Thus, they are always represented + // in the "raw" form. + + // raw contains a reference to the raw decode buffer. + // If non-nil, then its value takes precedence over str and num. + // It is only valid if num == raw.previousOffsetStart(). + raw *decodeBuffer + + // str is the unescaped JSON string if num is zero. + // Otherwise, it is "f", "i", or "u" if num should be interpreted + // as a float64, int64, or uint64, respectively. + str string + + // num is a float64, int64, or uint64 stored as a uint64 value. + // It is non-zero for any JSON number in the "exact" form. + num uint64 +} + +// TODO: Does representing 1-byte delimiters as *decodeBuffer cause performance issues? + +var ( + Null Token = rawToken("null") + False Token = rawToken("false") + True Token = rawToken("true") + + ObjectStart Token = rawToken("{") + ObjectEnd Token = rawToken("}") + ArrayStart Token = rawToken("[") + ArrayEnd Token = rawToken("]") + + zeroString Token = rawToken(`""`) + zeroNumber Token = rawToken(`0`) + + nanString Token = String("NaN") + pinfString Token = String("Infinity") + ninfString Token = String("-Infinity") +) + +func rawToken(s string) Token { + return Token{raw: &decodeBuffer{buf: []byte(s), prevStart: 0, prevEnd: len(s)}} +} + +// Bool constructs a Token representing a JSON boolean. +func Bool(b bool) Token { + if b { + return True + } + return False +} + +// String constructs a Token representing a JSON string. +// The provided string should contain valid UTF-8, otherwise invalid characters +// may be mangled as the Unicode replacement character. +func String(s string) Token { + if len(s) == 0 { + return zeroString + } + return Token{str: s} +} + +// Float constructs a Token representing a JSON number. +// The values NaN, +Inf, and -Inf will be represented +// as a JSON string with the values "NaN", "Infinity", and "-Infinity". +func Float(n float64) Token { + switch { + case math.Float64bits(n) == 0: + return zeroNumber + case math.IsNaN(n): + return nanString + case math.IsInf(n, +1): + return pinfString + case math.IsInf(n, -1): + return ninfString + } + return Token{str: "f", num: math.Float64bits(n)} +} + +// Int constructs a Token representing a JSON number from an int64. +func Int(n int64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "i", num: uint64(n)} +} + +// Uint constructs a Token representing a JSON number from a uint64. +func Uint(n uint64) Token { + if n == 0 { + return zeroNumber + } + return Token{str: "u", num: uint64(n)} +} + +// Clone makes a copy of the Token such that its value remains valid +// even after a subsequent Decoder.Read call. +func (t Token) Clone() Token { + // TODO: Allow caller to avoid any allocations? + if raw := t.raw; raw != nil { + // Avoid copying globals. + if t.raw.prevStart == 0 { + switch t.raw { + case Null.raw: + return Null + case False.raw: + return False + case True.raw: + return True + case ObjectStart.raw: + return ObjectStart + case ObjectEnd.raw: + return ObjectEnd + case ArrayStart.raw: + return ArrayStart + case ArrayEnd.raw: + return ArrayEnd + } + } + + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + // TODO(https://go.dev/issue/45038): Use bytes.Clone. + buf := append([]byte(nil), raw.previousBuffer()...) + return Token{raw: &decodeBuffer{buf: buf, prevStart: 0, prevEnd: len(buf)}} + } + return t +} + +// Bool returns the value for a JSON boolean. +// It panics if the token kind is not a JSON boolean. +func (t Token) Bool() bool { + switch t.raw { + case True.raw: + return true + case False.raw: + return false + default: + panic("invalid JSON token kind: " + t.Kind().String()) + } +} + +// appendString appends a JSON string to dst and returns it. +// It panics if t is not a JSON string. +func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRune func(rune) bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw string value. + buf := raw.previousBuffer() + if Kind(buf[0]) == '"' { + if escapeRune == nil && consumeSimpleString(buf) == len(buf) { + return append(dst, buf...), nil + } + dst, _, err := reformatString(dst, buf, validateUTF8, preserveRaw, escapeRune) + return dst, err + } + } else if len(t.str) != 0 && t.num == 0 { + // Handle exact string value. + return appendString(dst, t.str, validateUTF8, escapeRune) + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// String returns the unescaped string value for a JSON string. +// For other JSON kinds, this returns the raw JSON representation. +func (t Token) String() string { + // This is inlinable to take advantage of "function outlining". + // This avoids an allocation for the string(b) conversion + // if the caller does not use the string in an escaping manner. + // See https://blog.filippo.io/efficient-go-apis-with-the-inliner/ + s, b := t.string() + if len(b) > 0 { + return string(b) + } + return s +} +func (t Token) string() (string, []byte) { + if raw := t.raw; raw != nil { + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if buf[0] == '"' { + // TODO: Preserve valueFlags in Token? + isVerbatim := consumeSimpleString(buf) == len(buf) + return "", unescapeStringMayCopy(buf, isVerbatim) + } + // Handle tokens that are not JSON strings for fmt.Stringer. + return "", buf + } + if len(t.str) != 0 && t.num == 0 { + return t.str, nil + } + // Handle tokens that are not JSON strings for fmt.Stringer. + if t.num > 0 { + switch t.str[0] { + case 'f': + return string(appendNumber(nil, math.Float64frombits(t.num), 64)), nil + case 'i': + return strconv.FormatInt(int64(t.num), 10), nil + case 'u': + return strconv.FormatUint(uint64(t.num), 10), nil + } + } + return "", nil +} + +// appendNumber appends a JSON number to dst and returns it. +// It panics if t is not a JSON number. +func (t Token) appendNumber(dst []byte, canonicalize bool) ([]byte, error) { + if raw := t.raw; raw != nil { + // Handle raw number value. + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + if !canonicalize { + return append(dst, buf...), nil + } + dst, _, err := reformatNumber(dst, buf, canonicalize) + return dst, err + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return appendNumber(dst, math.Float64frombits(t.num), 64), nil + case 'i': + return strconv.AppendInt(dst, int64(t.num), 10), nil + case 'u': + return strconv.AppendUint(dst, uint64(t.num), 10), nil + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Float returns the floating-point value for a JSON number. +// It returns a NaN, +Inf, or -Inf value for any JSON string +// with the values "NaN", "Infinity", or "-Infinity". +// It panics for all other cases. +func (t Token) Float() float64 { + if raw := t.raw; raw != nil { + // Handle raw number value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + buf := raw.previousBuffer() + if Kind(buf[0]).normalize() == '0' { + fv, _ := parseFloat(buf, 64) + return fv + } + } else if t.num != 0 { + // Handle exact number value. + switch t.str[0] { + case 'f': + return math.Float64frombits(t.num) + case 'i': + return float64(int64(t.num)) + case 'u': + return float64(uint64(t.num)) + } + } + + // Handle string values with "NaN", "Infinity", or "-Infinity". + if t.Kind() == '"' { + switch t.String() { + case "NaN": + return math.NaN() + case "Infinity": + return math.Inf(+1) + case "-Infinity": + return math.Inf(-1) + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Int returns the signed integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an int64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Int() int64 { + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if numAbs, ok := parseDecUint(buf); ok { + if neg { + if numAbs > -minInt64 { + return minInt64 + } + return -1 * int64(numAbs) + } else { + if numAbs > +maxInt64 { + return maxInt64 + } + return +1 * int64(numAbs) + } + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'i': + return int64(t.num) + case 'u': + if t.num > maxInt64 { + return maxInt64 + } + return int64(t.num) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxInt64: + return maxInt64 + case fv <= minInt64: + return minInt64 + default: + return int64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Uint returns the unsigned integer value for a JSON number. +// The fractional component of any number is ignored (truncation toward zero). +// Any number beyond the representation of an uint64 will be saturated +// to the closest representable value. +// It panics if the token kind is not a JSON number. +func (t Token) Uint() uint64 { + // NOTE: This accessor returns 0 for any negative JSON number, + // which might be surprising, but is at least consistent with the behavior + // of saturating out-of-bounds numbers to the closest representable number. + + if raw := t.raw; raw != nil { + // Handle raw integer value. + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + neg := false + buf := raw.previousBuffer() + if len(buf) > 0 && buf[0] == '-' { + neg, buf = true, buf[1:] + } + if num, ok := parseDecUint(buf); ok { + if neg { + return minUint64 + } + return num + } + } else if t.num != 0 { + // Handle exact integer value. + switch t.str[0] { + case 'u': + return t.num + case 'i': + if int64(t.num) < minUint64 { + return minUint64 + } + return uint64(int64(t.num)) + } + } + + // Handle JSON number that is a floating-point value. + if t.Kind() == '0' { + switch fv := t.Float(); { + case fv >= maxUint64: + return maxUint64 + case fv <= minUint64: + return minUint64 + default: + return uint64(fv) // truncation toward zero + } + } + + panic("invalid JSON token kind: " + t.Kind().String()) +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + switch { + case t.raw != nil: + raw := t.raw + if uint64(raw.previousOffsetStart()) != t.num { + panic(invalidTokenPanic) + } + return Kind(t.raw.buf[raw.prevStart]).normalize() + case t.num != 0: + return '0' + case len(t.str) != 0: + return '"' + default: + return invalidKind + } +} + +// Kind represents each possible JSON token kind with a single byte, +// which is conveniently the first byte of that kind's grammar +// with the restriction that numbers always be represented with '0': +// +// - 'n': null +// - 'f': false +// - 't': true +// - '"': string +// - '0': number +// - '{': object start +// - '}': object end +// - '[': array start +// - ']': array end +// +// An invalid kind is usually represented using 0, +// but may be non-zero due to invalid JSON data. +type Kind byte + +const invalidKind Kind = 0 + +// String prints the kind in a humanly readable fashion. +func (k Kind) String() string { + switch k { + case 'n': + return "null" + case 'f': + return "false" + case 't': + return "true" + case '"': + return "string" + case '0': + return "number" + case '{': + return "{" + case '}': + return "}" + case '[': + return "[" + case ']': + return "]" + default: + return "" + } +} + +// normalize coalesces all possible starting characters of a number as just '0'. +func (k Kind) normalize() Kind { + if k == '-' || ('0' <= k && k <= '9') { + return '0' + } + return k +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go new file mode 100644 index 000000000..e0bd1b31d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -0,0 +1,381 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "errors" + "io" + "sort" + "sync" + "unicode/utf16" + "unicode/utf8" +) + +// NOTE: RawValue is analogous to v1 json.RawMessage. + +// RawValue represents a single raw JSON value, which may be one of the following: +// - a JSON literal (i.e., null, true, or false) +// - a JSON string (e.g., "hello, world!") +// - a JSON number (e.g., 123.456) +// - an entire JSON object (e.g., {"fizz":"buzz"} ) +// - an entire JSON array (e.g., [1,2,3] ) +// +// RawValue can represent entire array or object values, while Token cannot. +// RawValue may contain leading and/or trailing whitespace. +type RawValue []byte + +// Clone returns a copy of v. +func (v RawValue) Clone() RawValue { + if v == nil { + return nil + } + return append(RawValue{}, v...) +} + +// String returns the string formatting of v. +func (v RawValue) String() string { + if v == nil { + return "null" + } + return string(v) +} + +// IsValid reports whether the raw JSON value is syntactically valid +// according to RFC 7493. +// +// It verifies whether the input is properly encoded as UTF-8, +// that escape sequences within strings decode to valid Unicode codepoints, and +// that all names in each object are unique. +// It does not verify whether numbers are representable within the limits +// of any common numeric type (e.g., float64, int64, or uint64). +func (v RawValue) IsValid() bool { + d := getBufferedDecoder(v, DecodeOptions{}) + defer putBufferedDecoder(d) + _, errVal := d.ReadValue() + _, errEOF := d.ReadToken() + return errVal == nil && errEOF == io.EOF +} + +// Compact removes all whitespace from the raw JSON value. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already compacted, then the buffer is not mutated. +func (v *RawValue) Compact() error { + return v.reformat(false, false, "", "") +} + +// Indent reformats the whitespace in the raw JSON value so that each element +// in a JSON object or array begins on a new, indented line beginning with +// prefix followed by one or more copies of indent according to the nesting. +// The value does not begin with the prefix nor any indention, +// to make it easier to embed inside other formatted JSON data. +// +// It does not reformat JSON strings to use any other representation. +// It is guaranteed to succeed if the input is valid. +// If the value is already indented properly, then the buffer is not mutated. +func (v *RawValue) Indent(prefix, indent string) error { + return v.reformat(false, true, prefix, indent) +} + +// Canonicalize canonicalizes the raw JSON value according to the +// JSON Canonicalization Scheme (JCS) as defined by RFC 8785 +// where it produces a stable representation of a JSON value. +// +// The output stability is dependent on the stability of the application data +// (see RFC 8785, Appendix E). It cannot produce stable output from +// fundamentally unstable input. For example, if the JSON value +// contains ephemeral data (e.g., a frequently changing timestamp), +// then the value is still unstable regardless of whether this is called. +// +// Note that JCS treats all JSON numbers as IEEE 754 double precision numbers. +// Any numbers with precision beyond what is representable by that form +// will lose their precision when canonicalized. For example, integer values +// beyond ±2⁵³ will lose their precision. It is recommended that +// int64 and uint64 data types be represented as a JSON string. +// +// It is guaranteed to succeed if the input is valid. +// If the value is already canonicalized, then the buffer is not mutated. +func (v *RawValue) Canonicalize() error { + return v.reformat(true, false, "", "") +} + +// TODO: Instead of implementing the v1 Marshaler/Unmarshaler, +// consider implementing the v2 versions instead. + +// MarshalJSON returns v as the JSON encoding of v. +// It returns the stored value as the raw JSON output without any validation. +// If v is nil, then this returns a JSON null. +func (v RawValue) MarshalJSON() ([]byte, error) { + // NOTE: This matches the behavior of v1 json.RawMessage.MarshalJSON. + if v == nil { + return []byte("null"), nil + } + return v, nil +} + +// UnmarshalJSON sets v as the JSON encoding of b. +// It stores a copy of the provided raw JSON input without any validation. +func (v *RawValue) UnmarshalJSON(b []byte) error { + // NOTE: This matches the behavior of v1 json.RawMessage.UnmarshalJSON. + if v == nil { + return errors.New("json.RawValue: UnmarshalJSON on nil pointer") + } + *v = append((*v)[:0], b...) + return nil +} + +// Kind returns the starting token kind. +// For a valid value, this will never include '}' or ']'. +func (v RawValue) Kind() Kind { + if v := v[consumeWhitespace(v):]; len(v) > 0 { + return Kind(v[0]).normalize() + } + return invalidKind +} + +func (v *RawValue) reformat(canonical, multiline bool, prefix, indent string) error { + var eo EncodeOptions + if canonical { + eo.AllowInvalidUTF8 = false // per RFC 8785, section 3.2.4 + eo.AllowDuplicateNames = false // per RFC 8785, section 3.1 + eo.canonicalizeNumbers = true // per RFC 8785, section 3.2.2.3 + eo.EscapeRune = nil // per RFC 8785, section 3.2.2.2 + eo.multiline = false // per RFC 8785, section 3.2.1 + } else { + if s := trimLeftSpaceTab(prefix); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent prefix") + } + if s := trimLeftSpaceTab(indent); len(s) > 0 { + panic("json: invalid character " + quoteRune([]byte(s)) + " in indent") + } + eo.AllowInvalidUTF8 = true + eo.AllowDuplicateNames = true + eo.preserveRawStrings = true + eo.multiline = multiline // in case indent is empty + eo.IndentPrefix = prefix + eo.Indent = indent + } + eo.omitTopLevelNewline = true + + // Write the entire value to reformat all tokens and whitespace. + e := getBufferedEncoder(eo) + defer putBufferedEncoder(e) + if err := e.WriteValue(*v); err != nil { + return err + } + + // For canonical output, we may need to reorder object members. + if canonical { + // Obtain a buffered encoder just to use its internal buffer as + // a scratch buffer in reorderObjects for reordering object members. + e2 := getBufferedEncoder(EncodeOptions{}) + defer putBufferedEncoder(e2) + + // Disable redundant checks performed earlier during encoding. + d := getBufferedDecoder(e.buf, DecodeOptions{AllowInvalidUTF8: true, AllowDuplicateNames: true}) + defer putBufferedDecoder(d) + reorderObjects(d, &e2.buf) // per RFC 8785, section 3.2.3 + } + + // Store the result back into the value if different. + if !bytes.Equal(*v, e.buf) { + *v = append((*v)[:0], e.buf...) + } + return nil +} + +func trimLeftSpaceTab(s string) string { + for i, r := range s { + switch r { + case ' ', '\t': + default: + return s[i:] + } + } + return "" +} + +type memberName struct { + // name is the unescaped name. + name []byte + // before and after are byte offsets into Decoder.buf that represents + // the entire name/value pair. It may contain leading commas. + before, after int64 +} + +var memberNamePool = sync.Pool{New: func() any { return new(memberNames) }} + +func getMemberNames() *memberNames { + ns := memberNamePool.Get().(*memberNames) + *ns = (*ns)[:0] + return ns +} +func putMemberNames(ns *memberNames) { + if cap(*ns) < 1<<10 { + for i := range *ns { + (*ns)[i] = memberName{} // avoid pinning name + } + memberNamePool.Put(ns) + } +} + +type memberNames []memberName + +func (m *memberNames) Len() int { return len(*m) } +func (m *memberNames) Less(i, j int) bool { return lessUTF16((*m)[i].name, (*m)[j].name) } +func (m *memberNames) Swap(i, j int) { (*m)[i], (*m)[j] = (*m)[j], (*m)[i] } + +// reorderObjects recursively reorders all object members in place +// according to the ordering specified in RFC 8785, section 3.2.3. +// +// Pre-conditions: +// - The value is valid (i.e., no decoder errors should ever occur). +// - The value is compact (i.e., no whitespace is present). +// - Initial call is provided a Decoder reading from the start of v. +// +// Post-conditions: +// - Exactly one JSON value is read from the Decoder. +// - All fully-parsed JSON objects are reordered by directly moving +// the members in the value buffer. +// +// The runtime is approximately O(n·log(n)) + O(m·log(m)), +// where n is len(v) and m is the total number of object members. +func reorderObjects(d *Decoder, scratch *[]byte) { + switch tok, _ := d.ReadToken(); tok.Kind() { + case '{': + // Iterate and collect the name and offsets for every object member. + members := getMemberNames() + defer putMemberNames(members) + var prevName []byte + isSorted := true + + beforeBody := d.InputOffset() // offset after '{' + for d.PeekKind() != '}' { + beforeName := d.InputOffset() + var flags valueFlags + name, _ := d.readValue(&flags) + name = unescapeStringMayCopy(name, flags.isVerbatim()) + reorderObjects(d, scratch) + afterValue := d.InputOffset() + + if isSorted && len(*members) > 0 { + isSorted = lessUTF16(prevName, []byte(name)) + } + *members = append(*members, memberName{name, beforeName, afterValue}) + prevName = name + } + afterBody := d.InputOffset() // offset before '}' + d.ReadToken() + + // Sort the members; return early if it's already sorted. + if isSorted { + return + } + // TODO(https://go.dev/issue/47619): Use slices.Sort. + sort.Sort(members) + + // Append the reordered members to a new buffer, + // then copy the reordered members back over the original members. + // Avoid swapping in place since each member may be a different size + // where moving a member over a smaller member may corrupt the data + // for subsequent members before they have been moved. + // + // The following invariant must hold: + // sum([m.after-m.before for m in members]) == afterBody-beforeBody + sorted := (*scratch)[:0] + for i, member := range *members { + if d.buf[member.before] == ',' { + member.before++ // trim leading comma + } + sorted = append(sorted, d.buf[member.before:member.after]...) + if i < len(*members)-1 { + sorted = append(sorted, ',') // append trailing comma + } + } + if int(afterBody-beforeBody) != len(sorted) { + panic("BUG: length invariant violated") + } + copy(d.buf[beforeBody:afterBody], sorted) + + // Update scratch buffer to the largest amount ever used. + if len(sorted) > len(*scratch) { + *scratch = sorted + } + case '[': + for d.PeekKind() != ']' { + reorderObjects(d, scratch) + } + d.ReadToken() + } +} + +// lessUTF16 reports whether x is lexicographically less than y according +// to the UTF-16 codepoints of the UTF-8 encoded input strings. +// This implements the ordering specified in RFC 8785, section 3.2.3. +// The inputs must be valid UTF-8, otherwise this may panic. +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { + // NOTE: This is an optimized, allocation-free implementation + // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the + // two implementations agree on the result of comparing any two strings. + + isUTF16Self := func(r rune) bool { + return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') + } + + var invalidUTF8 bool + x0, y0 := x, y + for { + if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } + return len(x) < len(y) + } + + // ASCII fast-path. + if x[0] < utf8.RuneSelf || y[0] < utf8.RuneSelf { + if x[0] != y[0] { + return x[0] < y[0] + } + x, y = x[1:], y[1:] + continue + } + + // Decode next pair of runes as UTF-8. + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { + // The x rune is a single UTF-16 codepoint, while + // the y rune is a surrogate pair of UTF-16 codepoints. + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) + // The y rune is a single UTF-16 codepoint, while + // the x rune is a surrogate pair of UTF-16 codepoints. + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry + } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) + x, y = x[nx:], y[ny:] + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go new file mode 100644 index 000000000..61141a500 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -0,0 +1,260 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. +// - models: a map from definition name to OpenAPI V3 structural schema for each definition. +// Key in map is used to resolve references in the schema. +// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved. +// - returns: nil and an error if there is a parse error, or if schema does not satisfy a +// required structural schema invariant for conversion. If no error, returns +// a new smd schema. +// +// Schema should be validated as structural before using with this function, or +// there may be information lost. +func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + + for name, spec := range models { + // Skip/Ignore top-level references + if len(spec.Ref.String()) > 0 { + continue + } + + var a schema.Atom + + // Hard-coded schemas for now as proto_models implementation functions. + // https://github.com/kubernetes/kube-openapi/issues/364 + if name == quantityResource { + a = schema.Atom{ + Scalar: untypedDef.Atom.Scalar, + } + } else if name == rawExtensionResource { + a = untypedDef.Atom + } else { + c2 := c.push(name, &a) + c2.visitSpec(spec) + c.pop(c2) + } + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) visitSpec(m *spec.Schema) { + // Check if this schema opts its descendants into preserve-unknown-fields + if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + c.preserveUnknownFields = true + } + a := c.top() + *a = c.parseSchema(m) +} + +func (c *convert) parseSchema(m *spec.Schema) schema.Atom { + // k8s-generated OpenAPI specs have historically used only one value for + // type and starting with OpenAPIV3 it is only allowed to be + // a single string. + typ := "" + if len(m.Type) > 0 { + typ = m.Type[0] + } + + // Structural Schemas produced by kubernetes follow very specific rules which + // we can use to infer the SMD type: + switch typ { + case "": + // According to Swagger docs: + // https://swagger.io/docs/specification/data-models/data-types/#any + // + // If no type is specified, it is equivalent to accepting any type. + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: c.parseList(m), + Map: c.parseObject(m), + } + + case "object": + return schema.Atom{ + Map: c.parseObject(m), + } + case "array": + return schema.Atom{ + List: c.parseList(m), + } + case "integer", "boolean", "number", "string": + return convertPrimitive(typ, m.Format) + default: + c.reportError("unrecognized type: '%v'", typ) + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + } + } +} + +func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef { + refString := specSchema.Ref.String() + + // Special-case handling for $ref stored inside a single-element allOf + if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 { + refString = specSchema.AllOf[0].Ref.String() + } + + if _, n := path.Split(refString); len(n) > 0 { + //!TODO: Refactor the field ElementRelationship override + // we can generate the types with overrides ahead of time rather than + // requiring the hacky runtime support + // (could just create a normalized key struct containing all customizations + // to deduplicate) + mapRelationship, err := getMapElementRelationship(specSchema.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + if len(mapRelationship) > 0 { + return schema.TypeRef{ + NamedType: &n, + ElementRelationship: &mapRelationship, + } + } + + return schema.TypeRef{ + NamedType: &n, + } + + } + var inlined schema.Atom + + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &inlined) + c2.preserveUnknownFields = c.preserveUnknownFields + c2.visitSpec(specSchema) + c.pop(c2) + + return schema.TypeRef{ + Inlined: inlined, + } +} + +func (c *convert) parseObject(s *spec.Schema) *schema.Map { + var fields []schema.StructField + for name, member := range s.Properties { + fields = append(fields, schema.StructField{ + Name: name, + Type: c.makeOpenAPIRef(&member), + Default: member.Default, + }) + } + + // AdditionalProperties informs the schema of any "unknown" keys + // Unknown keys are enforced by the ElementType field. + elementType := func() schema.TypeRef { + if s.AdditionalProperties == nil { + // According to openAPI spec, an object without properties and without + // additionalProperties is assumed to be a free-form object. + if c.preserveUnknownFields || len(s.Properties) == 0 { + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + // If properties are specified, do not implicitly allow unknown + // fields + return schema.TypeRef{} + } else if s.AdditionalProperties.Schema != nil { + // Unknown fields use the referred schema + return c.makeOpenAPIRef(s.AdditionalProperties.Schema) + + } else if s.AdditionalProperties.Allows { + // A boolean instead of a schema was provided. Deduce the + // type from the value provided at runtime. + return schema.TypeRef{ + NamedType: &deducedName, + } + } else { + // Additional Properties are explicitly disallowed by the user. + // Ensure element type is empty. + return schema.TypeRef{} + } + }() + + relationship, err := getMapElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + return &schema.Map{ + Fields: fields, + ElementRelationship: relationship, + ElementType: elementType, + } +} + +func (c *convert) parseList(s *spec.Schema) *schema.List { + relationship, mapKeys, err := getListElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + elementType := func() schema.TypeRef { + if s.Items != nil { + if s.Items.Schema == nil || s.Items.Len() != 1 { + c.reportError("structural schema arrays must have exactly one member subtype") + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + subSchema := s.Items.Schema + if subSchema == nil { + subSchema = &s.Items.Schemas[0] + } + return c.makeOpenAPIRef(subSchema) + } else if len(s.Type) > 0 && len(s.Type[0]) > 0 { + c.reportError("`items` must be specified on arrays") + } + + // A list with no items specified is treated as "untyped". + return schema.TypeRef{ + NamedType: &untypedName, + } + + }() + + return &schema.List{ + ElementRelationship: relationship, + Keys: mapKeys, + ElementType: elementType, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go new file mode 100644 index 000000000..2c6fd76a9 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + for _, name := range models.ListModels() { + model := models.LookupModel(name) + + var a schema.Atom + c2 := c.push(name, &a) + model.Accept(c2) + c.pop(c2) + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + + mapRelationship, err := getMapElementRelationship(model.GetExtensions()) + + if err != nil { + c.reportError(err.Error()) + } + + // empty string means unset. + if len(mapRelationship) > 0 { + tr.ElementRelationship = &mapRelationship + } + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } +} + +func (c *convert) VisitArray(a *proto.Array) { + relationship, mapKeys, err := getListElementRelationship(a.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + atom := c.top() + atom.List = &schema.List{ + ElementType: c.makeRef(a.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + Keys: mapKeys, + } +} + +func (c *convert) VisitMap(m *proto.Map) { + relationship, err := getMapElementRelationship(m.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + a := c.top() + a.Map = &schema.Map{ + ElementType: c.makeRef(m.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + } +} + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + *a = convertPrimitive(p.Type, p.Format) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index 35241fde6..799d866d5 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -17,43 +17,18 @@ limitations under the License. package schemaconv import ( - "errors" "fmt" - "path" "sort" - "strings" - "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/schema" ) const ( - quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension" ) -// ToSchema converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2). -func ToSchema(models proto.Models) (*schema.Schema, error) { - return ToSchemaWithPreserveUnknownFields(models, false) -} - -// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. -func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { - c := convert{ - input: models, - preserveUnknownFields: preserveUnknownFields, - output: &schema.Schema{}, - } - if err := c.convertAll(); err != nil { - return nil, err - } - c.addCommonTypes() - return c.output, nil -} - type convert struct { - input proto.Models preserveUnknownFields bool output *schema.Schema @@ -64,7 +39,6 @@ type convert struct { func (c *convert) push(name string, a *schema.Atom) *convert { return &convert{ - input: c.input, preserveUnknownFields: c.preserveUnknownFields, output: c.output, currentName: name, @@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) { c.errorMessages = append(c.errorMessages, c2.errorMessages...) } -func (c *convert) convertAll() error { - for _, name := range c.input.ListModels() { - model := c.input.LookupModel(name) - c.insertTypeDef(name, model) - } - if len(c.errorMessages) > 0 { - return errors.New(strings.Join(c.errorMessages, "\n")) - } - return nil -} - func (c *convert) reportError(format string, args ...interface{}) { c.errorMessages = append(c.errorMessages, c.currentName+": "+fmt.Sprintf(format, args...), ) } -func (c *convert) insertTypeDef(name string, model proto.Schema) { +func (c *convert) insertTypeDef(name string, atom schema.Atom) { def := schema.TypeDef{ Name: name, + Atom: atom, } - c2 := c.push(name, &def.Atom) - model.Accept(c2) - c.pop(c2) if def.Atom == (schema.Atom{}) { // This could happen if there were a top-level reference. return @@ -156,32 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{ }, } -func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { - var tr schema.TypeRef - if r, ok := model.(*proto.Ref); ok { - if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { - return schema.TypeRef{ - NamedType: &untypedName, - } - } - // reference a named type - _, n := path.Split(r.Reference()) - tr.NamedType = &n - } else { - // compute the type inline - c2 := c.push("inlined in "+c.currentName, &tr.Inlined) - c2.preserveUnknownFields = preserveUnknownFields - model.Accept(c2) - c.pop(c2) - - if tr == (schema.TypeRef{}) { - // emit warning? - tr.NamedType = &untypedName - } - } - return tr -} - func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { schemaUnions := []schema.Union{} if iunions, ok := extensions["x-kubernetes-unions"]; ok { @@ -285,52 +220,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { return union, nil } -func (c *convert) VisitKind(k *proto.Kind) { - preserveUnknownFields := c.preserveUnknownFields - if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { - preserveUnknownFields = true - } - - a := c.top() - a.Map = &schema.Map{} - for _, name := range k.FieldOrder { - member := k.Fields[name] - tr := c.makeRef(member, preserveUnknownFields) - a.Map.Fields = append(a.Map.Fields, schema.StructField{ - Name: name, - Type: tr, - Default: member.GetDefault(), - }) - } - - unions, err := makeUnions(k.GetExtensions()) - if err != nil { - c.reportError(err.Error()) - return - } - // TODO: We should check that the fields and discriminator - // specified in the union are actual fields in the struct. - a.Map.Unions = unions - - if preserveUnknownFields { - a.Map.ElementType = schema.TypeRef{ - NamedType: &deducedName, - } - } - - ext := k.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable - default: - c.reportError("unknown map type %v", val) - } - } -} - func toStringSlice(o interface{}) (out []string, ok bool) { switch t := o.(type) { case []interface{}: @@ -341,117 +230,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) { } } return out, true + case []string: + return t, true } return nil, false } -func (c *convert) VisitArray(a *proto.Array) { - atom := c.top() - atom.List = &schema.List{ - ElementRelationship: schema.Atomic, - } - l := atom.List - l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) - - ext := a.GetExtensions() +func ptr(s schema.Scalar) *schema.Scalar { return &s } - if val, ok := ext["x-kubernetes-list-type"]; ok { - if val == "atomic" { - l.ElementRelationship = schema.Atomic - } else if val == "set" { - l.ElementRelationship = schema.Associative - } else if val == "map" { - l.ElementRelationship = schema.Associative - if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { - if keyNames, ok := toStringSlice(keys); ok { - l.Keys = keyNames - } else { - c.reportError("uninterpreted map keys: %#v", keys) - } - } else { - c.reportError("missing map keys") - } - } else { - c.reportError("unknown list type %v", val) - l.ElementRelationship = schema.Atomic - } - } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { - if val == "merge" || val == "merge,retainKeys" { - l.ElementRelationship = schema.Associative - if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { - if keyName, ok := key.(string); ok { - l.Keys = []string{keyName} - } else { - c.reportError("uninterpreted merge key: %#v", key) - } - } else { - // It's not an error for this to be absent, it - // means it's a set. - } - } else if val == "retainKeys" { - } else { - c.reportError("unknown patch strategy %v", val) - l.ElementRelationship = schema.Atomic +// Basic conversion functions to convert OpenAPI schema definitions to +// SMD Schema atoms +func convertPrimitive(typ string, format string) (a schema.Atom) { + switch typ { + case "integer": + a.Scalar = ptr(schema.Numeric) + case "number": + a.Scalar = ptr(schema.Numeric) + case "string": + switch format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } + case "boolean": + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } -} -func (c *convert) VisitMap(m *proto.Map) { - a := c.top() - a.Map = &schema.Map{} - a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + return a +} - ext := m.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { +func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) { + if val, ok := ext["x-kubernetes-list-type"]; ok { switch val { case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable + return schema.Atomic, nil, nil + case "set": + return schema.Associative, nil, nil + case "map": + keys, ok := ext["x-kubernetes-list-map-keys"] + + if !ok { + return schema.Associative, nil, fmt.Errorf("missing map keys") + } + + keyNames, ok := toStringSlice(keys) + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys) + } + + return schema.Associative, keyNames, nil default: - c.reportError("unknown map type %v", val) + return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val) } - } -} + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + switch val { + case "merge", "merge,retainKeys": + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + keyName, ok := key.(string) -func ptr(s schema.Scalar) *schema.Scalar { return &s } + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key) + } -func (c *convert) VisitPrimitive(p *proto.Primitive) { - a := c.top() - if c.currentName == quantityResource { - a.Scalar = ptr(schema.Scalar("untyped")) - } else { - switch p.Type { - case proto.Integer: - a.Scalar = ptr(schema.Numeric) - case proto.Number: - a.Scalar = ptr(schema.Numeric) - case proto.String: - switch p.Format { - case "": - a.Scalar = ptr(schema.String) - case "byte": - // byte really means []byte and is encoded as a string. - a.Scalar = ptr(schema.String) - case "int-or-string": - a.Scalar = ptr(schema.Scalar("untyped")) - case "date-time": - a.Scalar = ptr(schema.Scalar("untyped")) - default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Associative, []string{keyName}, nil } - case proto.Boolean: - a.Scalar = ptr(schema.Boolean) + // It's not an error for x-kubernetes-patch-merge-key to be absent, + // it means it's a set + return schema.Associative, nil, nil + case "retainKeys": + return schema.Atomic, nil, nil default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val) } } -} -func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = deducedDef.Atom + // Treat as atomic by default + return schema.Atomic, nil, nil } -func (c *convert) VisitReference(proto.Reference) { - // Do nothing, we handle references specially +// Returns map element relationship if specified, or empty string if unspecified +func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) { + val, ok := ext["x-kubernetes-map-type"] + if !ok { + // unset Map element relationship + return "", nil + } + + switch val { + case "atomic": + return schema.Atomic, nil + case "granular": + return schema.Separable, nil + default: + return "", fmt.Errorf("unknown map type %v", val) + } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 51dac4bdf..699291f1d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -41,6 +44,9 @@ func (e *Encoding) MarshalJSON() ([]byte, error) { } func (e *Encoding) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.EncodingProps); err != nil { return err } @@ -50,6 +56,20 @@ func (e *Encoding) UnmarshalJSON(data []byte) error { return nil } +func (e *Encoding) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + EncodingProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.EncodingProps = x.EncodingProps + return nil +} + type EncodingProps struct { // Content Type for encoding a specific property ContentType string `json:"contentType,omitempty"` @@ -58,7 +78,7 @@ type EncodingProps struct { // Describes how a specific property value will be serialized depending on its type Style string `json:"style,omitempty"` // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect - Explode string `json:"explode,omitempty"` + Explode bool `json:"explode,omitempty"` // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 0f5ab983c..03b872717 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -19,8 +19,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + + "k8s.io/kube-openapi/pkg/validation/spec" ) // Example https://swagger.io/specification/#example-object @@ -49,6 +52,9 @@ func (e *Example) MarshalJSON() ([]byte, error) { } func (e *Example) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.Refable); err != nil { return err } @@ -61,6 +67,23 @@ func (e *Example) UnmarshalJSON(data []byte) error { return nil } +func (e *Example) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExampleProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&e.Ref.Ref, x.Extensions); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExampleProps = x.ExampleProps + + return nil +} + type ExampleProps struct { // Summary holds a short description of the example Summary string `json:"summary,omitempty"` diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 117113e7a..e79956721 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,8 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type ExternalDocumentation struct { @@ -48,6 +51,9 @@ func (e *ExternalDocumentation) MarshalJSON() ([]byte, error) { } func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, e) + } if err := json.Unmarshal(data, &e.ExternalDocumentationProps); err != nil { return err } @@ -56,3 +62,16 @@ func (e *ExternalDocumentation) UnmarshalJSON(data []byte) error { } return nil } + +func (e *ExternalDocumentation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ExternalDocumentationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + e.Extensions = internal.SanitizeExtensions(x.Extensions) + e.ExternalDocumentationProps = x.ExternalDocumentationProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go new file mode 100644 index 000000000..bc19dd48e --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -0,0 +1,254 @@ +package spec3 + +import ( + "math/rand" + "strings" + + fuzz "github.com/google/gofuzz" + + "k8s.io/kube-openapi/pkg/validation/spec" +) + +// refChance is the chance that a particular component will use a $ref +// instead of fuzzed. Expressed as a fraction 1/n, currently there is +// a 1/3 chance that a ref will be used. +const refChance = 3 + +const alphaNumChars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func randAlphanumString() string { + arr := make([]string, rand.Intn(10)+5) + for i := 0; i < len(arr); i++ { + arr[i] = string(alphaNumChars[rand.Intn(len(alphaNumChars))]) + } + return strings.Join(arr, "") +} + +var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ + func(s *string, c fuzz.Continue) { + // All OpenAPI V3 map keys must follow the corresponding + // regex. Note that this restricts the range for all other + // string values as well. + str := randAlphanumString() + *s = str + }, + func(o *OpenAPI, c fuzz.Continue) { + c.FuzzNoCustom(o) + o.Version = "3.0.0" + }, + func(r *interface{}, c fuzz.Continue) { + switch c.Intn(3) { + case 0: + *r = nil + case 1: + n := c.RandString() + "x" + *r = n + case 2: + n := c.Float64() + *r = n + } + }, + func(v **spec.Info, c fuzz.Continue) { + // Info is never nil + *v = &spec.Info{} + c.FuzzNoCustom(*v) + (*v).Title = c.RandString() + "x" + }, + func(v *Paths, c fuzz.Continue) { + c.Fuzz(&v.VendorExtensible) + num := c.Intn(5) + if num > 0 { + v.Paths = make(map[string]*Path) + } + for i := 0; i < num; i++ { + val := Path{} + c.Fuzz(&val) + v.Paths["/"+c.RandString()] = &val + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + switch c.Intn(4) { + case 0: + v.Type = "apiKey" + v.Name = c.RandString() + "x" + switch c.Intn(3) { + case 0: + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + case 1: + v.Type = "http" + case 2: + v.Type = "oauth2" + v.Flows = make(map[string]*OAuthFlow) + flow := OAuthFlow{} + flow.AuthorizationUrl = c.RandString() + "x" + v.Flows["implicit"] = &flow + flow.Scopes = make(map[string]string) + flow.Scopes["foo"] = "bar" + case 3: + v.Type = "openIdConnect" + v.OpenIdConnectUrl = "https://" + c.RandString() + } + v.Scheme = "basic" + }, + func(v *spec.Ref, c fuzz.Continue) { + switch c.Intn(7) { + case 0: + *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) + case 1: + *v = spec.MustCreateRef("#/components/responses/" + randAlphanumString()) + case 2: + *v = spec.MustCreateRef("#/components/headers/" + randAlphanumString()) + case 3: + *v = spec.MustCreateRef("#/components/securitySchemes/" + randAlphanumString()) + case 5: + *v = spec.MustCreateRef("#/components/parameters/" + randAlphanumString()) + case 6: + *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) + } + }, + func(v *Parameter, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ParameterProps) + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + // Header param + v.In = "query" + case 1: + v.In = "header" + case 2: + v.In = "cookie" + } + }, + func(v *RequestBody, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.RequestBodyProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *Header, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.HeaderProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *ResponsesProps, c fuzz.Continue) { + c.Fuzz(&v.Default) + n := c.Intn(5) + for i := 0; i < n; i++ { + r2 := Response{} + c.Fuzz(&r2) + // HTTP Status code in 100-599 Range + code := c.Intn(500) + 100 + v.StatusCodeResponses = make(map[int]*Response) + v.StatusCodeResponses[code] = &r2 + } + }, + func(v *Response, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Refable) + return + } + c.Fuzz(&v.ResponseProps) + c.Fuzz(&v.VendorExtensible) + }, + func(v *spec.Extensions, c fuzz.Continue) { + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = spec.Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *spec.ExternalDocumentation, c fuzz.Continue) { + c.Fuzz(&v.Description) + v.URL = "https://" + randAlphanumString() + }, + func(v *spec.SchemaURL, c fuzz.Continue) { + *v = spec.SchemaURL("https://" + randAlphanumString()) + }, + func(v *spec.SchemaOrBool, c fuzz.Continue) { + *v = spec.SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &spec.Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v *spec.SchemaOrArray, c fuzz.Continue) { + *v = spec.SchemaOrArray{} + if c.RandBool() { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schema = &schema + } else { + v.Schemas = []spec.Schema{} + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + schema := spec.Schema{} + c.Fuzz(&schema) + v.Schemas = append(v.Schemas, schema) + } + + } + + }, + func(v *spec.SchemaOrStringArray, c fuzz.Continue) { + if c.RandBool() { + *v = spec.SchemaOrStringArray{} + if c.RandBool() { + c.Fuzz(&v.Property) + } else { + c.Fuzz(&v.Schema) + } + } + }, + func(v *spec.Schema, c fuzz.Continue) { + if c.Intn(refChance) == 0 { + c.Fuzz(&v.Ref) + return + } + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = spec.StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.ExtraProps) + } + + }, +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go index cead4b15d..ee5a30f79 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/header.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,9 @@ func (h *Header) MarshalJSON() ([]byte, error) { } func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, h) + } if err := json.Unmarshal(data, &h.Refable); err != nil { return err } @@ -63,6 +68,22 @@ func (h *Header) UnmarshalJSON(data []byte) error { return nil } +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + HeaderProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&h.Ref.Ref, x.Extensions); err != nil { + return err + } + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + return nil +} + // HeaderProps a struct that describes a header object type HeaderProps struct { // Description holds a brief description of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 828fd8dc5..d390e69bc 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -18,7 +18,10 @@ package spec3 import ( "encoding/json" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -44,6 +47,9 @@ func (m *MediaType) MarshalJSON() ([]byte, error) { } func (m *MediaType) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, m) + } if err := json.Unmarshal(data, &m.MediaTypeProps); err != nil { return err } @@ -53,10 +59,24 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { return nil } +func (m *MediaType) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + MediaTypeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + m.Extensions = internal.SanitizeExtensions(x.Extensions) + m.MediaTypeProps = x.MediaTypeProps + + return nil +} + // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` + Schema *spec.Schema `json:"schema,omitempty"` // Example of the media type Example interface{} `json:"example,omitempty"` // Examples of the media type. Each example object should match the media type and specific schema if present diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index de8aa4602..28230610b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject @@ -46,12 +48,28 @@ func (o *Operation) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, o) + } if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + OperationProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = x.OperationProps + return nil +} + // OperationProps describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject type OperationProps struct { // Tags holds a list of tags for API documentation control @@ -73,7 +91,7 @@ type OperationProps struct { // Deprecated declares this operation to be deprecated Deprecated bool `json:"deprecated,omitempty"` // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation - SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go index 0d7180e50..613da71a6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/parameter.go @@ -20,6 +20,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -50,6 +52,10 @@ func (p *Parameter) MarshalJSON() ([]byte, error) { } func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -63,6 +69,22 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return nil } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ParameterProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParameterProps = x.ParameterProps + return nil +} + // ParameterProps a struct that describes a single operation parameter, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#parameterObject type ParameterProps struct { // Name holds the name of the parameter diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index bc48c504d..40d9061ac 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strings" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject @@ -45,6 +48,9 @@ func (p *Paths) MarshalJSON() ([]byte, error) { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -74,6 +80,59 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *p = Paths{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi := Path{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]*Path) + } + p.Paths[k] = &pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Path describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject // // Note that this struct is actually a thin wrapper around PathProps to make it referable and extensible @@ -101,6 +160,9 @@ func (p *Path) MarshalJSON() ([]byte, error) { } func (p *Path) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, p) + } if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -113,6 +175,24 @@ func (p *Path) UnmarshalJSON(data []byte) error { return nil } +func (p *Path) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + PathProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&p.Ref.Ref, x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathProps = x.PathProps + + return nil +} + // PathProps describes the operations available on a single path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathItemObject type PathProps struct { // Summary holds a summary for all operations in this path diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 0adc62826..33267ce67 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -19,8 +19,10 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject @@ -50,6 +52,9 @@ func (r *RequestBody) MarshalJSON() ([]byte, error) { } func (r *RequestBody) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -71,3 +76,19 @@ type RequestBodyProps struct { // Required determines if the request body is required in the request Required bool `json:"required,omitempty"` } + +func (r *RequestBody) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + RequestBodyProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.RequestBodyProps = x.RequestBodyProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index ccd73369f..95b388e6c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -18,10 +18,13 @@ package spec3 import ( "encoding/json" + "fmt" "strconv" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Responses holds the list of possible responses as they are returned from executing this operation @@ -46,13 +49,15 @@ func (r *Responses) MarshalJSON() ([]byte, error) { } func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } - return nil } @@ -78,25 +83,91 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]*Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value } } return nil } +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + switch k := tok.Kind(); k { + case 'n': + *r = Responses{} + return nil + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + var ext any + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + r.ResponsesProps.Default = &resp + default: + if nk, err := strconv.Atoi(k); err == nil { + resp := Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]*Response{} + } + r.StatusCodeResponses[nk] = &resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // Response describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject // // Note that this struct is actually a thin wrapper around ResponseProps to make it referable and extensible @@ -124,6 +195,9 @@ func (r *Response) MarshalJSON() ([]byte, error) { } func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -133,7 +207,22 @@ func (r *Response) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } + return nil +} +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ResponseProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&r.Ref.Ref, x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } @@ -149,7 +238,6 @@ type ResponseProps struct { Links map[string]*Link `json:"links,omitempty"` } - // Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object type Link struct { spec.Refable @@ -175,6 +263,9 @@ func (r *Link) MarshalJSON() ([]byte, error) { } func (r *Link) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, r) + } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } @@ -188,6 +279,22 @@ func (r *Link) UnmarshalJSON(data []byte) error { return nil } +func (l *Link) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + LinkProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := internal.JSONRefFromMap(&l.Ref.Ref, x.Extensions); err != nil { + return err + } + l.Extensions = internal.SanitizeExtensions(x.Extensions) + l.LinkProps = x.LinkProps + return nil +} + // LinkProps describes a single response from an API Operation, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#responseObject type LinkProps struct { // OperationId is the name of an existing, resolvable OAS operation diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go deleted file mode 100644 index 0ce8924ef..000000000 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -// -// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible -type SecurityRequirement struct { - SecurityRequirementProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON -func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecurityRequirementProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -type SecurityRequirementProps map[string][]string diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index 9b1352f4e..edf7e6de3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index a505fb221..d5df0a781 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,9 +18,11 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" + "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" + "k8s.io/kube-openapi/pkg/validation/spec" ) type Server struct { @@ -51,6 +53,10 @@ func (s *Server) MarshalJSON() ([]byte, error) { } func (s *Server) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } + if err := json.Unmarshal(data, &s.ServerProps); err != nil { return err } @@ -60,6 +66,20 @@ func (s *Server) UnmarshalJSON(data []byte) error { return nil } +func (s *Server) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerProps = x.ServerProps + + return nil +} + type ServerVariable struct { ServerVariableProps spec.VendorExtensible @@ -88,6 +108,9 @@ func (s *ServerVariable) MarshalJSON() ([]byte, error) { } func (s *ServerVariable) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, s) + } if err := json.Unmarshal(data, &s.ServerVariableProps); err != nil { return err } @@ -96,3 +119,17 @@ func (s *ServerVariable) UnmarshalJSON(data []byte) error { } return nil } + +func (s *ServerVariable) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + spec.Extensions + ServerVariableProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.ServerVariableProps = x.ServerVariableProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go index 3ff48a3c3..bed096fb7 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/spec.go @@ -17,6 +17,10 @@ limitations under the License. package spec3 import ( + "encoding/json" + + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -35,3 +39,12 @@ type OpenAPI struct { // ExternalDocs holds additional external documentation ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` } + +func (o *OpenAPI) UnmarshalJSON(data []byte) error { + type OpenAPIWithNoFunctions OpenAPI + p := (*OpenAPIWithNoFunctions)(o) + if internal.UseOptimizedJSONUnmarshalingV3 { + return jsonv2.Unmarshal(data, &p) + } + return json.Unmarshal(data, &p) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index a3f476d5d..519dcf2eb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e switch s.GetType() { case object: for _, extension := range s.GetSpecificationExtension() { - if extension.Name == "x-kuberentes-group-version-kind" { + if extension.Name == "x-kubernetes-group-version-kind" { // Objects with x-kubernetes-group-version-kind are always top // level types. return d.parseV3Kind(s, path) @@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) { func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { if s == nil { - return nil, fmt.Errorf("cannot initializae BaseSchema from nil") + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") } def, err := parseV3Interface(s.GetDefault().ToRawInfo()) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go new file mode 100644 index 000000000..c66f998f5 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/fuzz.go @@ -0,0 +1,502 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "github.com/go-openapi/jsonreference" + "github.com/google/go-cmp/cmp" + fuzz "github.com/google/gofuzz" +) + +var SwaggerFuzzFuncs []interface{} = []interface{}{ + func(v *Responses, c fuzz.Continue) { + c.FuzzNoCustom(v) + if v.Default != nil { + // Check if we hit maxDepth and left an incomplete value + if v.Default.Description == "" { + v.Default = nil + v.StatusCodeResponses = nil + } + } + + // conversion has no way to discern empty statusCodeResponses from + // nil, since "default" is always included in the map. + // So avoid empty responses list + if len(v.StatusCodeResponses) == 0 { + v.StatusCodeResponses = nil + } + }, + func(v *Operation, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v != nil { + // force non-nil + v.Responses = &Responses{} + c.Fuzz(v.Responses) + + v.Schemes = nil + if c.RandBool() { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandBool() { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + } + }, + func(v map[int]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Prevent negative numbers + num := c.Intn(4) + for i := 0; i < num+2; i++ { + val := Response{} + c.Fuzz(&val) + + val.Description = c.RandString() + "x" + v[100*(i+1)+c.Intn(100)] = val + } + }, + func(v map[string]PathItem, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + num := c.Intn(5) + for i := 0; i < num+2; i++ { + val := PathItem{} + c.Fuzz(&val) + + // Ref params are only allowed in certain locations, so + // possibly add a few to PathItems + numRefsToAdd := c.Intn(5) + for i := 0; i < numRefsToAdd; i++ { + theRef := Parameter{} + c.Fuzz(&theRef.Refable) + + val.Parameters = append(val.Parameters, theRef) + } + + v["/"+c.RandString()] = val + } + }, + func(v *SchemaOrArray, c fuzz.Continue) { + *v = SchemaOrArray{} + // gnostic parser just doesn't support more + // than one Schema here + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + }, + func(v *SchemaOrBool, c fuzz.Continue) { + *v = SchemaOrBool{} + + if c.RandBool() { + v.Allows = c.RandBool() + } else { + v.Schema = &Schema{} + v.Allows = true + c.Fuzz(&v.Schema) + } + }, + func(v map[string]Response, c fuzz.Continue) { + n := 0 + c.Fuzz(&n) + if n == 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + return + } + + // Response definitions are not allowed to + // be refs + for i := 0; i < c.Intn(5)+1; i++ { + resp := &Response{} + + c.Fuzz(resp) + resp.Ref = Ref{} + resp.Description = c.RandString() + "x" + + // Response refs are not vendor extensible by gnostic + resp.VendorExtensible.Extensions = nil + v[c.RandString()+"x"] = *resp + } + }, + func(v *Header, c fuzz.Continue) { + if v != nil { + c.FuzzNoCustom(v) + + // descendant Items of Header may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Ref, c fuzz.Continue) { + *v = Ref{} + v.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + }, + func(v *Response, c fuzz.Continue) { + *v = Response{} + if c.RandBool() { + v.Ref = Ref{} + v.Ref.Ref, _ = jsonreference.New("http://asd.com/" + c.RandString()) + } else { + c.Fuzz(&v.VendorExtensible) + c.Fuzz(&v.Schema) + c.Fuzz(&v.ResponseProps) + + v.Headers = nil + v.Ref = Ref{} + + n := 0 + c.Fuzz(&n) + if n != 0 { + // Test that fuzzer is not at maxDepth so we do not + // end up with empty elements + num := c.Intn(4) + for i := 0; i < num; i++ { + if v.Headers == nil { + v.Headers = make(map[string]Header) + } + hdr := Header{} + c.Fuzz(&hdr) + if hdr.Type == "" { + // hit maxDepth, just abort trying to make haders + v.Headers = nil + break + } + v.Headers[c.RandString()+"x"] = hdr + } + } else { + v.Headers = nil + } + } + + v.Description = c.RandString() + "x" + + // Gnostic parses empty as nil, so to keep avoid putting empty + if len(v.Headers) == 0 { + v.Headers = nil + } + }, + func(v **Info, c fuzz.Continue) { + // Info is never nil + *v = &Info{} + c.FuzzNoCustom(*v) + + (*v).Title = c.RandString() + "x" + }, + func(v *Extensions, c fuzz.Continue) { + // gnostic parser only picks up x- vendor extensions + numChildren := c.Intn(5) + for i := 0; i < numChildren; i++ { + if *v == nil { + *v = Extensions{} + } + (*v)["x-"+c.RandString()] = c.RandString() + } + }, + func(v *Swagger, c fuzz.Continue) { + c.FuzzNoCustom(v) + + if v.Paths == nil { + // Force paths non-nil since it does not have omitempty in json tag. + // This means a perfect roundtrip (via json) is impossible, + // since we can't tell the difference between empty/unspecified paths + v.Paths = &Paths{} + c.Fuzz(v.Paths) + } + + v.Swagger = "2.0" + + // Gnostic support serializing ID at all + // unavoidable data loss + v.ID = "" + + v.Schemes = nil + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "http") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "https") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "ws") + } + + if c.RandUint64()%2 == 1 { + v.Schemes = append(v.Schemes, "wss") + } + + // Gnostic unconditionally makes security values non-null + // So do not fuzz null values into the array. + for i, val := range v.Security { + if val == nil { + v.Security[i] = make(map[string][]string) + } + + for k, v := range val { + if v == nil { + val[k] = make([]string, 0) + } + } + } + }, + func(v *SecurityScheme, c fuzz.Continue) { + v.Description = c.RandString() + "x" + c.Fuzz(&v.VendorExtensible) + + switch c.Intn(3) { + case 0: + v.Type = "basic" + case 1: + v.Type = "apiKey" + switch c.Intn(2) { + case 0: + v.In = "header" + case 1: + v.In = "query" + default: + panic("unreachable") + } + v.Name = "x" + c.RandString() + case 2: + v.Type = "oauth2" + + switch c.Intn(4) { + case 0: + v.Flow = "accessCode" + v.TokenURL = "https://" + c.RandString() + v.AuthorizationURL = "https://" + c.RandString() + case 1: + v.Flow = "application" + v.TokenURL = "https://" + c.RandString() + case 2: + v.Flow = "implicit" + v.AuthorizationURL = "https://" + c.RandString() + case 3: + v.Flow = "password" + v.TokenURL = "https://" + c.RandString() + default: + panic("unreachable") + } + c.Fuzz(&v.Scopes) + default: + panic("unreachable") + } + }, + func(v *interface{}, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *string, c fuzz.Continue) { + *v = c.RandString() + "x" + }, + func(v *ExternalDocumentation, c fuzz.Continue) { + v.Description = c.RandString() + "x" + v.URL = c.RandString() + "x" + }, + func(v *SimpleSchema, c fuzz.Continue) { + c.FuzzNoCustom(v) + + switch c.Intn(5) { + case 0: + v.Type = "string" + case 1: + v.Type = "number" + case 2: + v.Type = "boolean" + case 3: + v.Type = "integer" + case 4: + v.Type = "array" + default: + panic("unreachable") + } + + switch c.Intn(5) { + case 0: + v.CollectionFormat = "csv" + case 1: + v.CollectionFormat = "ssv" + case 2: + v.CollectionFormat = "tsv" + case 3: + v.CollectionFormat = "pipes" + case 4: + v.CollectionFormat = "" + default: + panic("unreachable") + } + + // None of the types which include SimpleSchema in our definitions + // actually support "example" in the official spec + v.Example = nil + + // unsupported by openapi + v.Nullable = false + }, + func(v *int64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0 { + *v = 1 + } + }, + func(v *float64, c fuzz.Continue) { + c.Fuzz(v) + + // Gnostic does not differentiate between 0 and non-specified + // so avoid using 0 for fuzzer + if *v == 0.0 { + *v = 1.0 + } + }, + func(v *Parameter, c fuzz.Continue) { + if v == nil { + return + } + c.Fuzz(&v.VendorExtensible) + if c.RandBool() { + // body param + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + v.In = "body" + c.Fuzz(&v.Description) + c.Fuzz(&v.Required) + + v.Schema = &Schema{} + c.Fuzz(&v.Schema) + + } else { + c.Fuzz(&v.SimpleSchema) + c.Fuzz(&v.CommonValidations) + v.AllowEmptyValue = false + v.Description = c.RandString() + "x" + v.Name = c.RandString() + "x" + + switch c.Intn(4) { + case 0: + // Header param + v.In = "header" + case 1: + // Form data param + v.In = "formData" + v.AllowEmptyValue = c.RandBool() + case 2: + // Query param + v.In = "query" + v.AllowEmptyValue = c.RandBool() + case 3: + // Path param + v.In = "path" + v.Required = true + default: + panic("unreachable") + } + + // descendant Items of Parameter may not be refs + cur := v.Items + for cur != nil { + cur.Ref = Ref{} + cur = cur.Items + } + } + }, + func(v *Schema, c fuzz.Continue) { + if c.RandBool() { + // file schema + c.Fuzz(&v.Default) + c.Fuzz(&v.Description) + c.Fuzz(&v.Example) + c.Fuzz(&v.ExternalDocs) + + c.Fuzz(&v.Format) + c.Fuzz(&v.ReadOnly) + c.Fuzz(&v.Required) + c.Fuzz(&v.Title) + v.Type = StringOrArray{"file"} + + } else { + // normal schema + c.Fuzz(&v.SchemaProps) + c.Fuzz(&v.SwaggerSchemaProps) + c.Fuzz(&v.VendorExtensible) + // c.Fuzz(&v.ExtraProps) + // ExtraProps will not roundtrip - gnostic throws out + // unrecognized keys + } + + // Not supported by official openapi v2 spec + // and stripped by k8s apiserver + v.ID = "" + v.AnyOf = nil + v.OneOf = nil + v.Not = nil + v.Nullable = false + v.AdditionalItems = nil + v.Schema = "" + v.PatternProperties = nil + v.Definitions = nil + v.Dependencies = nil + }, +} + +var SwaggerDiffOptions = []cmp.Option{ + // cmp.Diff panics on Ref since jsonreference.Ref uses unexported fields + cmp.Comparer(func(a Ref, b Ref) bool { + return a.String() == b.String() + }), +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go new file mode 100644 index 000000000..406a09d9d --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/gnostic.go @@ -0,0 +1,1517 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spec + +import ( + "errors" + "strconv" + + "github.com/go-openapi/jsonreference" + openapi_v2 "github.com/google/gnostic/openapiv2" +) + +// Interfaces +type GnosticCommonValidations interface { + GetMaximum() float64 + GetExclusiveMaximum() bool + GetMinimum() float64 + GetExclusiveMinimum() bool + GetMaxLength() int64 + GetMinLength() int64 + GetPattern() string + GetMaxItems() int64 + GetMinItems() int64 + GetUniqueItems() bool + GetMultipleOf() float64 + GetEnum() []*openapi_v2.Any +} + +func (k *CommonValidations) FromGnostic(g GnosticCommonValidations) error { + if g == nil { + return nil + } + + max := g.GetMaximum() + if max != 0 { + k.Maximum = &max + } + + k.ExclusiveMaximum = g.GetExclusiveMaximum() + + min := g.GetMinimum() + if min != 0 { + k.Minimum = &min + } + + k.ExclusiveMinimum = g.GetExclusiveMinimum() + + maxLen := g.GetMaxLength() + if maxLen != 0 { + k.MaxLength = &maxLen + } + + minLen := g.GetMinLength() + if minLen != 0 { + k.MinLength = &minLen + } + + k.Pattern = g.GetPattern() + + maxItems := g.GetMaxItems() + if maxItems != 0 { + k.MaxItems = &maxItems + } + + minItems := g.GetMinItems() + if minItems != 0 { + k.MinItems = &minItems + } + + k.UniqueItems = g.GetUniqueItems() + + multOf := g.GetMultipleOf() + if multOf != 0 { + k.MultipleOf = &multOf + } + + enums := g.GetEnum() + + if enums != nil { + k.Enum = make([]interface{}, len(enums)) + for i, v := range enums { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Enum[i] = convert + } + } + } + + return nil +} + +type GnosticSimpleSchema interface { + GetType() string + GetFormat() string + GetItems() *openapi_v2.PrimitivesItems + GetCollectionFormat() string + GetDefault() *openapi_v2.Any +} + +func (k *SimpleSchema) FromGnostic(g GnosticSimpleSchema) error { + if g == nil { + return nil + } + + k.Type = g.GetType() + k.Format = g.GetFormat() + k.CollectionFormat = g.GetCollectionFormat() + + items := g.GetItems() + if items != nil { + k.Items = &Items{} + if err := k.Items.FromGnostic(items); err != nil { + return err + } + } + + def := g.GetDefault() + if def != nil { + var convert interface{} + if err := def.ToRawInfo().Decode(&convert); err != nil { + return err + } else { + k.Default = convert + } + } + + return nil +} + +func (k *Items) FromGnostic(g *openapi_v2.PrimitivesItems) error { + if g == nil { + return nil + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + return nil +} + +func (k *VendorExtensible) FromGnostic(g []*openapi_v2.NamedAny) error { + if len(g) == 0 { + return nil + } + + k.Extensions = make(Extensions, len(g)) + for _, v := range g { + if v == nil { + continue + } + + if v.Value == nil { + k.Extensions[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return err + } else { + k.Extensions[v.Name] = iface + } + } + return nil +} + +func (k *Refable) FromGnostic(g string) error { + return k.Ref.FromGnostic(g) +} + +func (k *Ref) FromGnostic(g string) error { + if g == "" { + return nil + } + + ref, err := jsonreference.New(g) + if err != nil { + return err + } + + *k = Ref{ + Ref: ref, + } + + return nil +} + +// Converts a gnostic v2 Document to a kube-openapi Swagger Document +// +// Caveats: +// +// - gnostic v2 documents treats zero as unspecified for numerical fields of +// CommonValidations fields such as Maximum, Minimum, MaximumItems, etc. +// There will always be data loss if one of the values of these fields is set to zero. +// +// Returns: +// +// - `ok`: `false` if a value was present in the gnostic document which cannot be +// roundtripped into kube-openapi types. In these instances, `ok` is set to +// `false` and the value is skipped. +// +// - `err`: an unexpected error occurred in the conversion from the gnostic type +// to kube-openapi type. +func (k *Swagger) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.SwaggerProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *SwaggerProps) FromGnostic(g *openapi_v2.Document) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + // openapi_v2.Document does not support "ID" field, so it will not be + // included + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Swagger = g.Swagger + + if g.Info != nil { + k.Info = &Info{} + if nok, err := k.Info.FromGnostic(g.Info); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Host = g.Host + k.BasePath = g.BasePath + + if g.Paths != nil { + k.Paths = &Paths{} + if nok, err := k.Paths.FromGnostic(g.Paths); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Definitions != nil { + k.Definitions = make(Definitions, len(g.Definitions.AdditionalProperties)) + for _, v := range g.Definitions.AdditionalProperties { + if v == nil { + continue + } + converted := Schema{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Definitions[v.Name] = converted + + } + } + + if g.Parameters != nil { + k.Parameters = make( + map[string]Parameter, + len(g.Parameters.AdditionalProperties)) + for _, v := range g.Parameters.AdditionalProperties { + if v == nil { + continue + } + p := Parameter{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Parameters[v.Name] = p + } + } + + if g.Responses != nil { + k.Responses = make( + map[string]Response, + len(g.Responses.AdditionalProperties)) + + for _, v := range g.Responses.AdditionalProperties { + if v == nil { + continue + } + p := Response{} + if nok, err := p.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Responses[v.Name] = p + } + } + + if g.SecurityDefinitions != nil { + k.SecurityDefinitions = make(SecurityDefinitions) + if err := k.SecurityDefinitions.FromGnostic(g.SecurityDefinitions); err != nil { + return false, err + } + } + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Tags != nil { + k.Tags = make([]Tag, len(g.Tags)) + for i, v := range g.Tags { + if v == nil { + continue + } else if nok, err := k.Tags[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Info + +func (k *Info) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + ok = true + if g == nil { + return true, nil + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.InfoProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *InfoProps) FromGnostic(g *openapi_v2.Info) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Title = g.Title + k.TermsOfService = g.TermsOfService + + if g.Contact != nil { + k.Contact = &ContactInfo{} + + if nok, err := k.Contact.FromGnostic(g.Contact); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.License != nil { + k.License = &License{} + if nok, err := k.License.FromGnostic(g.License); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.Version = g.Version + return ok, nil +} + +func (k *License) FromGnostic(g *openapi_v2.License) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + + // License does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +func (k *ContactInfo) FromGnostic(g *openapi_v2.Contact) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Name = g.Name + k.URL = g.Url + k.Email = g.Email + + // ContactInfo does not embed to VendorExtensible! + // data loss from g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} + +// Paths + +func (k *Paths) FromGnostic(g *openapi_v2.Paths) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if g.Path != nil { + k.Paths = make(map[string]PathItem, len(g.Path)) + for _, v := range g.Path { + if v == nil { + continue + } + + converted := PathItem{} + if nok, err := converted.FromGnostic(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Paths[v.Name] = converted + } + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItem) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if nok, err := k.PathItemProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.Refable.FromGnostic(g.XRef); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *PathItemProps) FromGnostic(g *openapi_v2.PathItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if g.Get != nil { + k.Get = &Operation{} + if nok, err := k.Get.FromGnostic(g.Get); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Put != nil { + k.Put = &Operation{} + if nok, err := k.Put.FromGnostic(g.Put); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Post != nil { + k.Post = &Operation{} + if nok, err := k.Post.FromGnostic(g.Post); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Delete != nil { + k.Delete = &Operation{} + if nok, err := k.Delete.FromGnostic(g.Delete); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Options != nil { + k.Options = &Operation{} + if nok, err := k.Options.FromGnostic(g.Options); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Head != nil { + k.Head = &Operation{} + if nok, err := k.Head.FromGnostic(g.Head); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Patch != nil { + k.Patch = &Operation{} + if nok, err := k.Patch.FromGnostic(g.Patch); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Operation) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.OperationProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *OperationProps) FromGnostic(g *openapi_v2.Operation) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + k.Description = g.Description + k.Consumes = g.Consumes + k.Produces = g.Produces + k.Schemes = g.Schemes + k.Tags = g.Tags + k.Summary = g.Summary + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + k.ID = g.OperationId + k.Deprecated = g.Deprecated + + if g.Security != nil { + k.Security = make([]map[string][]string, len(g.Security)) + for i, v := range g.Security { + if v == nil || v.AdditionalProperties == nil { + continue + } + + k.Security[i] = make(map[string][]string, len(v.AdditionalProperties)) + converted := k.Security[i] + for _, p := range v.AdditionalProperties { + if p == nil { + continue + } + + if p.Value != nil { + converted[p.Name] = p.Value.Value + } else { + converted[p.Name] = nil + } + } + } + } + + if g.Parameters != nil { + k.Parameters = make([]Parameter, len(g.Parameters)) + for i, v := range g.Parameters { + if v == nil { + continue + } else if nok, err := k.Parameters[i].FromGnosticParametersItem(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + if g.Responses != nil { + k.Responses = &Responses{} + if nok, err := k.Responses.FromGnostic(g.Responses); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// Responses + +func (k *Responses) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponsesProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponsesProps) FromGnostic(g *openapi_v2.Responses) (ok bool, err error) { + if g == nil { + return true, nil + } else if g.ResponseCode == nil { + return ok, nil + } + + ok = true + for _, v := range g.ResponseCode { + if v == nil { + continue + } + if v.Name == "default" { + k.Default = &Response{} + if nok, err := k.Default.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + } else if nk, err := strconv.Atoi(v.Name); err != nil { + // This should actually never fail, unless gnostic struct was + // manually/purposefully tampered with at runtime. + // Gnostic's ParseDocument validates that all StatusCodeResponses + // keys adhere to the following regex ^([0-9]{3})$|^(default)$ + ok = false + } else { + if k.StatusCodeResponses == nil { + k.StatusCodeResponses = map[int]Response{} + } + + res := Response{} + if nok, err := res.FromGnosticResponseValue(v.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + k.StatusCodeResponses[nk] = res + } + } + + return ok, nil +} + +func (k *Response) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + // Refable case handled in FromGnosticResponseValue + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + if nok, err := k.ResponseProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *Response) FromGnosticResponseValue(g *openapi_v2.ResponseValue) (ok bool, err error) { + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetResponse()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +func (k *ResponseProps) FromGnostic(g *openapi_v2.Response) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + + if g.Schema != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnosticSchemaItem(g.Schema); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Headers != nil { + k.Headers = make(map[string]Header, len(g.Headers.AdditionalProperties)) + for _, v := range g.Headers.AdditionalProperties { + if v == nil { + continue + } + + converted := Header{} + if err := converted.FromGnostic(v.GetValue()); err != nil { + return false, err + } + + k.Headers[v.Name] = converted + } + } + + if g.Examples != nil { + k.Examples = make(map[string]interface{}, len(g.Examples.AdditionalProperties)) + for _, v := range g.Examples.AdditionalProperties { + if v == nil { + continue + } else if v.Value == nil { + k.Examples[v.Name] = nil + continue + } + + var iface interface{} + if err := v.Value.ToRawInfo().Decode(&iface); err != nil { + return false, err + } else { + k.Examples[v.Name] = iface + } + } + } + + return ok, nil +} + +// Header + +func (k *Header) FromGnostic(g *openapi_v2.Header) (err error) { + if g == nil { + return nil + } + + if err := k.CommonValidations.FromGnostic(g); err != nil { + return err + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return err + } + + if err := k.SimpleSchema.FromGnostic(g); err != nil { + return err + } + + if err := k.HeaderProps.FromGnostic(g); err != nil { + return err + } + + return nil +} + +func (k *HeaderProps) FromGnostic(g *openapi_v2.Header) error { + if g == nil { + return nil + } + + // All other fields of openapi_v2.Header are handled by + // the embeded fields, commonvalidations, etc. + k.Description = g.Description + return nil +} + +// Parameters + +func (k *Parameter) FromGnostic(g *openapi_v2.Parameter) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + switch p := g.Oneof.(type) { + case *openapi_v2.Parameter_BodyParameter: + if nok, err := k.ParamProps.FromGnostic(p.BodyParameter); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(p.BodyParameter.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.Parameter_NonBodyParameter: + switch nb := g.GetNonBodyParameter().Oneof.(type) { + case *openapi_v2.NonBodyParameter_HeaderParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.HeaderParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.HeaderParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_FormDataParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.FormDataParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.FormDataParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_QueryParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.QueryParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.QueryParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + case *openapi_v2.NonBodyParameter_PathParameterSubSchema: + if nok, err := k.ParamProps.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.SimpleSchema.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.CommonValidations.FromGnostic(nb.PathParameterSubSchema); err != nil { + return false, err + } + + if err := k.VendorExtensible.FromGnostic(nb.PathParameterSubSchema.GetVendorExtension()); err != nil { + return false, err + } + + return ok, nil + default: + return false, errors.New("unrecognized nonbody type for Parameter") + } + default: + return false, errors.New("unrecognized type for Parameter") + } +} + +type GnosticCommonParamProps interface { + GetName() string + GetRequired() bool + GetIn() string + GetDescription() string +} + +type GnosticCommonParamPropsBodyParameter interface { + GetSchema() *openapi_v2.Schema +} + +type GnosticCommonParamPropsFormData interface { + GetAllowEmptyValue() bool +} + +func (k *ParamProps) FromGnostic(g GnosticCommonParamProps) (ok bool, err error) { + ok = true + k.Description = g.GetDescription() + k.In = g.GetIn() + k.Name = g.GetName() + k.Required = g.GetRequired() + + if formDataParameter, success := g.(GnosticCommonParamPropsFormData); success { + k.AllowEmptyValue = formDataParameter.GetAllowEmptyValue() + } + + if bodyParameter, success := g.(GnosticCommonParamPropsBodyParameter); success { + if bodyParameter.GetSchema() != nil { + k.Schema = &Schema{} + if nok, err := k.Schema.FromGnostic(bodyParameter.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +// PB types use a different structure than we do for "refable". For PB, there is +// a wrappign oneof type that could be a ref or the type +func (k *Parameter) FromGnosticParametersItem(g *openapi_v2.ParametersItem) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + if ref := g.GetJsonReference(); ref != nil { + k.Description = ref.Description + + if err := k.Refable.FromGnostic(ref.XRef); err != nil { + return false, err + } + } else if nok, err := k.FromGnostic(g.GetParameter()); err != nil { + return false, err + } else if !nok { + ok = false + } + + return ok, nil +} + +// Schema + +func (k *Schema) FromGnostic(g *openapi_v2.Schema) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + + // SwaggerSchemaProps + k.Discriminator = g.Discriminator + k.ReadOnly = g.ReadOnly + k.Description = g.Description + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Example != nil { + if err := g.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + // SchemaProps + if err := k.Ref.FromGnostic(g.XRef); err != nil { + return false, err + } + k.Type = g.Type.GetValue() + k.Format = g.GetFormat() + k.Title = g.GetTitle() + + // These below fields are not available in gnostic types, so will never + // be populated. This means roundtrips which make use of these + // (non-official, kube-only) fields will lose information. + // + // Schema.ID is not available in official spec + // Schema.$schema + // Schema.Nullable - in openapiv3, not v2 + // Schema.AnyOf - in openapiv3, not v2 + // Schema.OneOf - in openapiv3, not v2 + // Schema.Not - in openapiv3, not v2 + // Schema.PatternProperties - in openapiv3, not v2 + // Schema.Dependencies - in openapiv3, not v2 + // Schema.AdditionalItems + // Schema.Definitions - not part of spec + // Schema.ExtraProps - gnostic parser rejects any keys it does not recognize + + if g.GetDefault() != nil { + if err := g.GetDefault().ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + // These conditionals (!= 0) follow gnostic's logic for ToRawInfo + // The keys in gnostic source are only included if nonzero. + + if g.Maximum != 0.0 { + k.Maximum = &g.Maximum + } + + if g.Minimum != 0.0 { + k.Minimum = &g.Minimum + } + + k.ExclusiveMaximum = g.ExclusiveMaximum + k.ExclusiveMinimum = g.ExclusiveMinimum + + if g.MaxLength != 0 { + k.MaxLength = &g.MaxLength + } + + if g.MinLength != 0 { + k.MinLength = &g.MinLength + } + + k.Pattern = g.GetPattern() + + if g.MaxItems != 0 { + k.MaxItems = &g.MaxItems + } + + if g.MinItems != 0 { + k.MinItems = &g.MinItems + } + k.UniqueItems = g.UniqueItems + + if g.MultipleOf != 0 { + k.MultipleOf = &g.MultipleOf + } + + for _, v := range g.GetEnum() { + if v == nil { + continue + } + + var convert interface{} + if err := v.ToRawInfo().Decode(&convert); err != nil { + return false, err + } + k.Enum = append(k.Enum, convert) + } + + if g.MaxProperties != 0 { + k.MaxProperties = &g.MaxProperties + } + + if g.MinProperties != 0 { + k.MinProperties = &g.MinProperties + } + + k.Required = g.Required + + if g.GetItems() != nil { + k.Items = &SchemaOrArray{} + for _, v := range g.Items.GetSchema() { + if v == nil { + continue + } + + schema := Schema{} + if nok, err := schema.FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + k.Items.Schemas = append(k.Items.Schemas, schema) + } + + if len(k.Items.Schemas) == 1 { + k.Items.Schema = &k.Items.Schemas[0] + k.Items.Schemas = nil + } + } + + for i, v := range g.GetAllOf() { + if v == nil { + continue + } + + k.AllOf = append(k.AllOf, Schema{}) + if nok, err := k.AllOf[i].FromGnostic(v); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if g.Properties != nil { + k.Properties = make(map[string]Schema) + for _, namedSchema := range g.Properties.AdditionalProperties { + if namedSchema == nil { + continue + } + val := &Schema{} + if nok, err := val.FromGnostic(namedSchema.Value); err != nil { + return false, err + } else if !nok { + ok = false + } + + k.Properties[namedSchema.Name] = *val + } + } + + if g.AdditionalProperties != nil { + k.AdditionalProperties = &SchemaOrBool{} + if g.AdditionalProperties.GetSchema() == nil { + k.AdditionalProperties.Allows = g.AdditionalProperties.GetBoolean() + } else { + k.AdditionalProperties.Schema = &Schema{} + k.AdditionalProperties.Allows = true + + if nok, err := k.AdditionalProperties.Schema.FromGnostic(g.AdditionalProperties.GetSchema()); err != nil { + return false, err + } else if !nok { + ok = false + } + } + } + + return ok, nil +} + +func (k *Schema) FromGnosticSchemaItem(g *openapi_v2.SchemaItem) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + + switch p := g.Oneof.(type) { + case *openapi_v2.SchemaItem_FileSchema: + fileSchema := p.FileSchema + + if err := k.VendorExtensible.FromGnostic(fileSchema.VendorExtension); err != nil { + return false, err + } + + k.Format = fileSchema.Format + k.Title = fileSchema.Title + k.Description = fileSchema.Description + k.Required = fileSchema.Required + k.Type = []string{fileSchema.Type} + k.ReadOnly = fileSchema.ReadOnly + + if fileSchema.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(fileSchema.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + if fileSchema.Example != nil { + if err := fileSchema.Example.ToRawInfo().Decode(&k.Example); err != nil { + return false, err + } + } + + if fileSchema.Default != nil { + if err := fileSchema.Default.ToRawInfo().Decode(&k.Default); err != nil { + return false, err + } + } + + case *openapi_v2.SchemaItem_Schema: + schema := p.Schema + + if nok, err := k.FromGnostic(schema); err != nil { + return false, err + } else if !nok { + ok = false + } + default: + return false, errors.New("unrecognized type for SchemaItem") + } + + return ok, nil +} + +// SecurityDefinitions + +func (k SecurityDefinitions) FromGnostic(g *openapi_v2.SecurityDefinitions) error { + for _, v := range g.GetAdditionalProperties() { + if v == nil { + continue + } + secScheme := &SecurityScheme{} + if err := secScheme.FromGnostic(v.Value); err != nil { + return err + } + k[v.Name] = secScheme + } + + return nil +} + +type GnosticCommonSecurityDefinition interface { + GetType() string + GetDescription() string +} + +func (k *SecuritySchemeProps) FromGnostic(g GnosticCommonSecurityDefinition) error { + k.Type = g.GetType() + k.Description = g.GetDescription() + + if hasName, success := g.(interface{ GetName() string }); success { + k.Name = hasName.GetName() + } + + if hasIn, success := g.(interface{ GetIn() string }); success { + k.In = hasIn.GetIn() + } + + if hasFlow, success := g.(interface{ GetFlow() string }); success { + k.Flow = hasFlow.GetFlow() + } + + if hasAuthURL, success := g.(interface{ GetAuthorizationUrl() string }); success { + k.AuthorizationURL = hasAuthURL.GetAuthorizationUrl() + } + + if hasTokenURL, success := g.(interface{ GetTokenUrl() string }); success { + k.TokenURL = hasTokenURL.GetTokenUrl() + } + + if hasScopes, success := g.(interface { + GetScopes() *openapi_v2.Oauth2Scopes + }); success { + scopes := hasScopes.GetScopes() + if scopes != nil { + k.Scopes = make(map[string]string, len(scopes.AdditionalProperties)) + for _, v := range scopes.AdditionalProperties { + if v == nil { + continue + } + + k.Scopes[v.Name] = v.Value + } + } + } + + return nil +} + +func (k *SecurityScheme) FromGnostic(g *openapi_v2.SecurityDefinitionsItem) error { + if g == nil { + return nil + } + + switch s := g.Oneof.(type) { + case *openapi_v2.SecurityDefinitionsItem_ApiKeySecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.ApiKeySecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.ApiKeySecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_BasicAuthenticationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.BasicAuthenticationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.BasicAuthenticationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2AccessCodeSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2AccessCodeSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ApplicationSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ApplicationSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ApplicationSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2ImplicitSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2ImplicitSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2ImplicitSecurity.VendorExtension); err != nil { + return err + } + return nil + case *openapi_v2.SecurityDefinitionsItem_Oauth2PasswordSecurity: + if err := k.SecuritySchemeProps.FromGnostic(s.Oauth2PasswordSecurity); err != nil { + return err + } + if err := k.VendorExtensible.FromGnostic(s.Oauth2PasswordSecurity.VendorExtension); err != nil { + return err + } + return nil + default: + return errors.New("unrecognized SecurityDefinitionsItem") + } +} + +// Tag + +func (k *Tag) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + + ok = true + + if nok, err := k.TagProps.FromGnostic(g); err != nil { + return false, err + } else if !nok { + ok = false + } + + if err := k.VendorExtensible.FromGnostic(g.VendorExtension); err != nil { + return false, err + } + return ok, nil +} + +func (k *TagProps) FromGnostic(g *openapi_v2.Tag) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.Name = g.Name + + if g.ExternalDocs != nil { + k.ExternalDocs = &ExternalDocumentation{} + if nok, err := k.ExternalDocs.FromGnostic(g.ExternalDocs); err != nil { + return false, err + } else if !nok { + ok = false + } + } + + return ok, nil +} + +// ExternalDocumentation + +func (k *ExternalDocumentation) FromGnostic(g *openapi_v2.ExternalDocs) (ok bool, err error) { + if g == nil { + return true, nil + } + ok = true + k.Description = g.Description + k.URL = g.Url + + // data loss! g.VendorExtension + if len(g.VendorExtension) != 0 { + ok = false + } + + return ok, nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go index 597fc9631..05310c46b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) const ( @@ -41,6 +43,9 @@ type Header struct { // MarshalJSON marshal this to JSON func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.CommonValidations) if err != nil { return nil, err @@ -60,8 +65,26 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4), nil } +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, h) + } + if err := json.Unmarshal(data, &h.CommonValidations); err != nil { return err } @@ -73,3 +96,23 @@ func (h *Header) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &h.HeaderProps) } + +func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + HeaderProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + h.CommonValidations = x.CommonValidations + h.SimpleSchema = x.SimpleSchema + h.Extensions = internal.SanitizeExtensions(x.Extensions) + h.HeaderProps = x.HeaderProps + + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 51a2f5781..d667b705b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -19,6 +19,8 @@ import ( "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Extensions vendor specific extensions @@ -87,6 +89,19 @@ func (e Extensions) GetObject(key string, out interface{}) error { return nil } +func (e Extensions) sanitizeWithExtra() (extra map[string]any) { + for k, v := range e { + if !internal.IsExtensionKey(k) { + if extra == nil { + extra = make(map[string]any) + } + extra[k] = v + delete(e, k) + } + } + return extra +} + // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions @@ -154,6 +169,9 @@ type Info struct { // MarshalJSON marshal this to JSON func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.InfoProps) if err != nil { return nil, err @@ -165,10 +183,37 @@ func (i Info) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (i *Info) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + if err := json.Unmarshal(data, &i.InfoProps); err != nil { return err } return json.Unmarshal(data, &i.VendorExtensible) } + +func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + InfoProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + i.Extensions = internal.SanitizeExtensions(x.Extensions) + i.InfoProps = x.InfoProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go index b75aefe16..4132467d2 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) const ( @@ -35,6 +37,18 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + // CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` @@ -51,6 +65,23 @@ type CommonValidations struct { Enum []interface{} `json:"enum,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // @@ -64,6 +95,10 @@ type Items struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (i *Items) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, i) + } + var validations CommonValidations if err := json.Unmarshal(data, &validations); err != nil { return err @@ -87,8 +122,30 @@ func (i *Items) UnmarshalJSON(data []byte) error { return nil } +func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + + i.CommonValidations = x.CommonValidations + i.SimpleSchema = x.SimpleSchema + i.Extensions = internal.SanitizeExtensions(x.Extensions) + return nil +} + // MarshalJSON converts this items object to JSON func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.CommonValidations) if err != nil { return nil, err @@ -107,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b4, b3, b1, b2), nil } + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go index c7acd8672..63eed3460 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // OperationProps describes an operation @@ -40,6 +42,23 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + // MarshalJSON takes care of serializing operation properties to JSON // // We use a custom marhaller here to handle a special cases related to @@ -75,14 +94,35 @@ type Operation struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (o *Operation) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, o) + } + if err := json.Unmarshal(data, &o.OperationProps); err != nil { return err } return json.Unmarshal(data, &o.VendorExtensible) } +func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + type OperationPropsNoMethods OperationProps // strip MarshalJSON method + var x struct { + Extensions + OperationPropsNoMethods + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + o.Extensions = internal.SanitizeExtensions(x.Extensions) + o.OperationProps = OperationProps(x.OperationPropsNoMethods) + return nil +} + // MarshalJSON converts this items object to JSON func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -94,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go index 218513974..53d1e0aa9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // ParamProps describes the specific attributes of an operation parameter @@ -34,30 +36,46 @@ type ParamProps struct { AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. // * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. +// +// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, +// the path parameter is `itemId`. +// // * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. // * Header - Custom headers that are expected as part of the request. // * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. +// +// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for +// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist +// together for the same operation. +// // * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. +// +// `multipart/form-data` are used as the content type of the request (in Swagger's definition, +// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used +// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be +// declared together with a body parameter for the same operation. Form parameters have a different format based on +// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). +// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. +// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple +// parameters that are being transferred. +// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. +// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is +// `submit-name`. This type of form parameters is more commonly used for file transfers. // // For more information: http://goo.gl/8us55a#parameterObject type Parameter struct { @@ -70,6 +88,10 @@ type Parameter struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Parameter) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.CommonValidations); err != nil { return err } @@ -85,8 +107,31 @@ func (p *Parameter) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.ParamProps) } +func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + CommonValidations + SimpleSchema + Extensions + ParamProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.CommonValidations = x.CommonValidations + p.SimpleSchema = x.SimpleSchema + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.ParamProps = x.ParamProps + return nil +} + // MarshalJSON converts this items object to JSON func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.CommonValidations) if err != nil { return nil, err @@ -109,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2, b4, b5), nil } + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go index 04de58f00..1d1588cb9 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // PathItemProps the path item specific properties @@ -46,6 +48,10 @@ type PathItem struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *PathItem) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + if err := json.Unmarshal(data, &p.Refable); err != nil { return err } @@ -55,8 +61,29 @@ func (p *PathItem) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.PathItemProps) } +func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + PathItemProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps + + return nil +} + // MarshalJSON converts this items object to JSON func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b3, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -72,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b3, b4, b5) return concated, nil } + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 319aba879..18f6a9f42 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -16,9 +16,12 @@ package spec import ( "encoding/json" + "fmt" "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Paths holds the relative paths to the individual endpoints. @@ -34,6 +37,10 @@ type Paths struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (p *Paths) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, p) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { return err @@ -63,8 +70,65 @@ func (p *Paths) UnmarshalJSON(data []byte) error { return nil } +func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var pi PathItem + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + + if tok.Kind() == '}' { + return nil + } + + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if p.Extensions == nil { + p.Extensions = make(map[string]any) + } + p.Extensions[k] = ext + case len(k) > 0 && k[0] == '/': + pi = PathItem{} + if err := opts.UnmarshalNext(dec, &pi); err != nil { + return err + } + + if p.Paths == nil { + p.Paths = make(map[string]PathItem) + } + p.Paths[k] = pi + default: + _, err := dec.ReadValue() // skip value + if err != nil { + return err + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} + // MarshalJSON converts this items object to JSON func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err @@ -83,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 1405bfd8e..775b3b0c3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -21,6 +21,8 @@ import ( "path/filepath" "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" ) // Refable is a struct for things that accept a $ref property @@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error { } func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil + return internal.JSONRefFromMap(&r.Ref, v) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go index 9fd717ec3..3ff1fe132 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // ResponseProps properties specific to a response @@ -28,6 +30,15 @@ type ResponseProps struct { Examples map[string]interface{} `json:"examples,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + // Response describes a single response from an API Operation. // // For more information: http://goo.gl/8us55a#responseObject @@ -39,17 +50,47 @@ type Response struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (r *Response) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.ResponseProps); err != nil { return err } if err := json.Unmarshal(data, &r.Refable); err != nil { return err } - return json.Unmarshal(data, &r.VendorExtensible) + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { + return err + } + + return nil +} + +func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + ResponseProps + Extensions + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { + return err + } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps + + return nil } // MarshalJSON converts this items object to JSON func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponseProps) if err != nil { return nil, err @@ -65,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + // NewResponse creates a new response instance func NewResponse() *Response { return new(Response) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go index b2c3883a9..d9ad760a4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -16,10 +16,13 @@ package spec import ( "encoding/json" + "fmt" "reflect" "strconv" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Responses is a container for the expected responses of an operation. @@ -42,6 +45,10 @@ type Responses struct { // UnmarshalJSON hydrates this items instance with the data from JSON func (r *Responses) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } @@ -56,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error { // MarshalJSON converts this items object to JSON func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -68,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + // ResponsesProps describes all responses for an operation. // It tells what is the default response and maps all responses with a // HTTP status code. @@ -90,21 +119,90 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, r) + } + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = &v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = value } } return nil } + +func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) (err error) { + tok, err := dec.ReadToken() + if err != nil { + return err + } + var ext any + var resp Response + switch k := tok.Kind(); k { + case 'n': + return nil // noop + case '{': + for { + tok, err := dec.ReadToken() + if err != nil { + return err + } + if tok.Kind() == '}' { + return nil + } + switch k := tok.String(); { + case internal.IsExtensionKey(k): + ext = nil + if err := opts.UnmarshalNext(dec, &ext); err != nil { + return err + } + + if r.Extensions == nil { + r.Extensions = make(map[string]any) + } + r.Extensions[k] = ext + case k == "default": + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + respCopy := resp + r.ResponsesProps.Default = &respCopy + default: + if nk, err := strconv.Atoi(k); err == nil { + resp = Response{} + if err := opts.UnmarshalNext(dec, &resp); err != nil { + return err + } + + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = resp + } + } + } + default: + return fmt.Errorf("unknown JSON kind: %v", k) + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go index b0aeeb0d0..dfbb2e05c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -21,6 +21,8 @@ import ( "strings" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // BooleanProperty creates a boolean property @@ -194,6 +196,46 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + // SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` @@ -202,6 +244,15 @@ type SwaggerSchemaProps struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + // Schema the schema object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) @@ -432,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { // MarshalJSON marshal this to JSON func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SchemaProps) if err != nil { return nil, fmt.Errorf("schema props %v", err) @@ -463,8 +517,37 @@ func (s Schema) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Schema string `json:"$schema,omitempty"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + x.Schema = string(s.Schema) + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + props := struct { SchemaProps SwaggerSchemaProps @@ -511,3 +594,38 @@ func (s *Schema) UnmarshalJSON(data []byte) error { return nil } + +func (s *Schema) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SchemaProps + SwaggerSchemaProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + + if err := x.Ref.fromMap(x.Extensions); err != nil { + return err + } + + if err := x.Schema.fromMap(x.Extensions); err != nil { + return err + } + + delete(x.Extensions, "$ref") + delete(x.Extensions, "$schema") + + for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { + delete(x.Extensions, pn) + } + if len(x.Extensions) == 0 { + x.Extensions = nil + } + + s.ExtraProps = x.Extensions.sanitizeWithExtra() + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SchemaProps = x.SchemaProps + s.SwaggerSchemaProps = x.SwaggerSchemaProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go index 563b9b95e..e2b7da14c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section @@ -44,6 +46,9 @@ type SecurityScheme struct { // MarshalJSON marshal this to JSON func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -55,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { @@ -62,3 +77,16 @@ func (s *SecurityScheme) UnmarshalJSON(data []byte) error { } return json.Unmarshal(data, &s.VendorExtensible) } + +func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SecuritySchemeProps = x.SecuritySchemeProps + return nil +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go index be66d1ddd..c8f3beaa3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -19,6 +19,8 @@ import ( "fmt" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // Swagger this is the root document object for the API specification. @@ -33,6 +35,9 @@ type Swagger struct { // MarshalJSON marshals this swagger structure to json func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SwaggerProps) if err != nil { return nil, err @@ -44,8 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals a swagger spec from json func (s *Swagger) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { return err @@ -57,6 +76,23 @@ func (s *Swagger) UnmarshalJSON(data []byte) error { return nil } +func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + // Note: If you're willing to make breaking changes, it is possible to + // optimize this and other usages of this pattern: + // https://github.com/kubernetes/kube-openapi/pull/319#discussion_r983165948 + var x struct { + Extensions + SwaggerProps + } + + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + s.Extensions = internal.SanitizeExtensions(x.Extensions) + s.SwaggerProps = x.SwaggerProps + return nil +} + // SwaggerProps captures the top-level properties of an Api specification // // NOTE: validation rules @@ -96,6 +132,9 @@ var jsFalse = []byte("false") // MarshalJSON convert this object to JSON func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if s.Schema != nil { return json.Marshal(s.Schema) } @@ -106,23 +145,59 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch + if len(data) > 0 && data[0] == '{' { + var sch Schema + if err := json.Unmarshal(data, &sch); err != nil { + return err } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') + nw.Schema = &sch + nw.Allows = true + } else { + json.Unmarshal(data, &nw.Allows) } *s = nw return nil } +func (s *SchemaOrBool) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '{': + err := opts.UnmarshalNext(dec, &s.Schema) + if err != nil { + return err + } + s.Allows = true + return nil + case 't', 'f': + err := opts.UnmarshalNext(dec, &s.Allows) + if err != nil { + return err + } + return nil + default: + return fmt.Errorf("expected object or bool, not '%v'", k.String()) + } +} + // SchemaOrStringArray represents a schema or a string array type SchemaOrStringArray struct { Schema *Schema @@ -131,6 +206,9 @@ type SchemaOrStringArray struct { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Property) > 0 { return json.Marshal(s.Property) } @@ -140,8 +218,23 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var first byte if len(data) > 1 { first = data[0] @@ -163,6 +256,18 @@ func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { return nil } +func (s *SchemaOrStringArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Property) + default: + _, err := dec.ReadValue() + return err + } +} + // Definitions contains the models explicitly defined in this spec // An object to hold data types that can be consumed and produced by operations. // These data types can be primitives, arrays or models. @@ -193,6 +298,10 @@ func (s StringOrArray) Contains(value string) bool { // UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string func (s *StringOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var first byte if len(data) > 1 { first = data[0] @@ -223,6 +332,23 @@ func (s *StringOrArray) UnmarshalJSON(data []byte) error { } } +func (s *StringOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch k := dec.PeekKind(); k { + case '[': + *s = StringOrArray{} + return opts.UnmarshalNext(dec, (*[]string)(s)) + case '"': + *s = StringOrArray{""} + return opts.UnmarshalNext(dec, &(*s)[0]) + case 'n': + // Throw out null token + _, _ = dec.ReadToken() + return nil + default: + return fmt.Errorf("expected string or array, not '%v'", k.String()) + } +} + // MarshalJSON converts this string or array to a JSON array or JSON string func (s StringOrArray) MarshalJSON() ([]byte, error) { if len(s) == 1 { @@ -256,14 +382,29 @@ func (s *SchemaOrArray) ContainsType(name string) bool { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } + if s.Schemas != nil { return json.Marshal(s.Schemas) } return json.Marshal(s.Schema) } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schemas != nil { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, s) + } + var nw SchemaOrArray var first byte if len(data) > 1 { @@ -284,3 +425,15 @@ func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { *s = nw return nil } + +func (s *SchemaOrArray) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + switch dec.PeekKind() { + case '{': + return opts.UnmarshalNext(dec, &s.Schema) + case '[': + return opts.UnmarshalNext(dec, &s.Schemas) + default: + _, err := dec.ReadValue() + return err + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go index ddd1eac7e..d105d52ca 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -18,6 +18,8 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) // TagProps describe a tag entry in the top level tags section of a swagger spec @@ -39,6 +41,9 @@ type Tag struct { // MarshalJSON marshal this to JSON func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } b1, err := json.Marshal(t.TagProps) if err != nil { return nil, err @@ -50,10 +55,37 @@ func (t Tag) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (t *Tag) UnmarshalJSON(data []byte) error { + if internal.UseOptimizedJSONUnmarshaling { + return jsonv2.Unmarshal(data, t) + } + if err := json.Unmarshal(data, &t.TagProps); err != nil { return err } return json.Unmarshal(data, &t.VendorExtensible) } + +func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decoder) error { + var x struct { + Extensions + TagProps + } + if err := opts.UnmarshalNext(dec, &x); err != nil { + return err + } + t.Extensions = internal.SanitizeExtensions(x.Extensions) + t.TagProps = x.TagProps + return nil +} diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index f5802d2e8..b8103223a 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -52,6 +52,9 @@ func Int(i int) *int { return &i } +// IntPtr is a function variable referring to Int. +// +// Deprecated: Use Int instead. var IntPtr = Int // for back-compat // IntDeref dereferences the int ptr and returns it if not nil, or else @@ -63,6 +66,9 @@ func IntDeref(ptr *int, def int) int { return def } +// IntPtrDerefOr is a function variable referring to IntDeref. +// +// Deprecated: Use IntDeref instead. var IntPtrDerefOr = IntDeref // for back-compat // Int32 returns a pointer to an int32. @@ -70,6 +76,9 @@ func Int32(i int32) *int32 { return &i } +// Int32Ptr is a function variable referring to Int32. +// +// Deprecated: Use Int32 instead. var Int32Ptr = Int32 // for back-compat // Int32Deref dereferences the int32 ptr and returns it if not nil, or else @@ -81,6 +90,9 @@ func Int32Deref(ptr *int32, def int32) int32 { return def } +// Int32PtrDerefOr is a function variable referring to Int32Deref. +// +// Deprecated: Use Int32Deref instead. var Int32PtrDerefOr = Int32Deref // for back-compat // Int32Equal returns true if both arguments are nil or both arguments @@ -95,11 +107,74 @@ func Int32Equal(a, b *int32) bool { return *a == *b } +// Uint returns a pointer to an uint +func Uint(i uint) *uint { + return &i +} + +// UintPtr is a function variable referring to Uint. +// +// Deprecated: Use Uint instead. +var UintPtr = Uint // for back-compat + +// UintDeref dereferences the uint ptr and returns it if not nil, or else +// returns def. +func UintDeref(ptr *uint, def uint) uint { + if ptr != nil { + return *ptr + } + return def +} + +// UintPtrDerefOr is a function variable referring to UintDeref. +// +// Deprecated: Use UintDeref instead. +var UintPtrDerefOr = UintDeref // for back-compat + +// Uint32 returns a pointer to an uint32. +func Uint32(i uint32) *uint32 { + return &i +} + +// Uint32Ptr is a function variable referring to Uint32. +// +// Deprecated: Use Uint32 instead. +var Uint32Ptr = Uint32 // for back-compat + +// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else +// returns def. +func Uint32Deref(ptr *uint32, def uint32) uint32 { + if ptr != nil { + return *ptr + } + return def +} + +// Uint32PtrDerefOr is a function variable referring to Uint32Deref. +// +// Deprecated: Use Uint32Deref instead. +var Uint32PtrDerefOr = Uint32Deref // for back-compat + +// Uint32Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Uint32Equal(a, b *uint32) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + // Int64 returns a pointer to an int64. func Int64(i int64) *int64 { return &i } +// Int64Ptr is a function variable referring to Int64. +// +// Deprecated: Use Int64 instead. var Int64Ptr = Int64 // for back-compat // Int64Deref dereferences the int64 ptr and returns it if not nil, or else @@ -111,6 +186,9 @@ func Int64Deref(ptr *int64, def int64) int64 { return def } +// Int64PtrDerefOr is a function variable referring to Int64Deref. +// +// Deprecated: Use Int64Deref instead. var Int64PtrDerefOr = Int64Deref // for back-compat // Int64Equal returns true if both arguments are nil or both arguments @@ -125,11 +203,50 @@ func Int64Equal(a, b *int64) bool { return *a == *b } +// Uint64 returns a pointer to an uint64. +func Uint64(i uint64) *uint64 { + return &i +} + +// Uint64Ptr is a function variable referring to Uint64. +// +// Deprecated: Use Uint64 instead. +var Uint64Ptr = Uint64 // for back-compat + +// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else +// returns def. +func Uint64Deref(ptr *uint64, def uint64) uint64 { + if ptr != nil { + return *ptr + } + return def +} + +// Uint64PtrDerefOr is a function variable referring to Uint64Deref. +// +// Deprecated: Use Uint64Deref instead. +var Uint64PtrDerefOr = Uint64Deref // for back-compat + +// Uint64Equal returns true if both arguments are nil or both arguments +// dereference to the same value. +func Uint64Equal(a, b *uint64) bool { + if (a == nil) != (b == nil) { + return false + } + if a == nil { + return true + } + return *a == *b +} + // Bool returns a pointer to a bool. func Bool(b bool) *bool { return &b } +// BoolPtr is a function variable referring to Bool. +// +// Deprecated: Use Bool instead. var BoolPtr = Bool // for back-compat // BoolDeref dereferences the bool ptr and returns it if not nil, or else @@ -141,6 +258,9 @@ func BoolDeref(ptr *bool, def bool) bool { return def } +// BoolPtrDerefOr is a function variable referring to BoolDeref. +// +// Deprecated: Use BoolDeref instead. var BoolPtrDerefOr = BoolDeref // for back-compat // BoolEqual returns true if both arguments are nil or both arguments @@ -160,6 +280,9 @@ func String(s string) *string { return &s } +// StringPtr is a function variable referring to String. +// +// Deprecated: Use String instead. var StringPtr = String // for back-compat // StringDeref dereferences the string ptr and returns it if not nil, or else @@ -171,6 +294,9 @@ func StringDeref(ptr *string, def string) string { return def } +// StringPtrDerefOr is a function variable referring to StringDeref. +// +// Deprecated: Use StringDeref instead. var StringPtrDerefOr = StringDeref // for back-compat // StringEqual returns true if both arguments are nil or both arguments @@ -190,6 +316,9 @@ func Float32(i float32) *float32 { return &i } +// Float32Ptr is a function variable referring to Float32. +// +// Deprecated: Use Float32 instead. var Float32Ptr = Float32 // Float32Deref dereferences the float32 ptr and returns it if not nil, or else @@ -201,6 +330,9 @@ func Float32Deref(ptr *float32, def float32) float32 { return def } +// Float32PtrDerefOr is a function variable referring to Float32Deref. +// +// Deprecated: Use Float32Deref instead. var Float32PtrDerefOr = Float32Deref // for back-compat // Float32Equal returns true if both arguments are nil or both arguments @@ -220,6 +352,9 @@ func Float64(i float64) *float64 { return &i } +// Float64Ptr is a function variable referring to Float64. +// +// Deprecated: Use Float64 instead. var Float64Ptr = Float64 // Float64Deref dereferences the float64 ptr and returns it if not nil, or else @@ -231,6 +366,9 @@ func Float64Deref(ptr *float64, def float64) float64 { return def } +// Float64PtrDerefOr is a function variable referring to Float64Deref. +// +// Deprecated: Use Float64Deref instead. var Float64PtrDerefOr = Float64Deref // for back-compat // Float64Equal returns true if both arguments are nil or both arguments diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 3023d1066..a0b07a6d7 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "math/rand" + "sync" "time" "k8s.io/klog/v2" @@ -93,13 +94,16 @@ func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.T // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { + // constant fields name string fields []Field - threshold *time.Duration startTime time.Time - endTime *time.Time - traceItems []traceItem parentTrace *Trace + // fields guarded by a lock + lock sync.RWMutex + threshold *time.Duration + endTime *time.Time + traceItems []traceItem } func (t *Trace) time() time.Time { @@ -138,6 +142,8 @@ func New(name string, fields ...Field) *Trace { // how long it took. The Fields add key value pairs to provide additional details about the trace // step. func (t *Trace) Step(msg string, fields ...Field) { + t.lock.Lock() + defer t.lock.Unlock() if t.traceItems == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.traceItems = make([]traceItem, 0, 6) @@ -153,7 +159,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { newTrace := New(msg, fields...) if t != nil { newTrace.parentTrace = t + t.lock.Lock() t.traceItems = append(t.traceItems, newTrace) + t.lock.Unlock() } return newTrace } @@ -163,7 +171,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace { // is logged. func (t *Trace) Log() { endTime := time.Now() + t.lock.Lock() t.endTime = &endTime + t.lock.Unlock() // an explicit logging request should dump all the steps out at the higher level if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace t.logTrace() @@ -178,13 +188,17 @@ func (t *Trace) Log() { // If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it // is nested within is logged. func (t *Trace) LogIfLong(threshold time.Duration) { + t.lock.Lock() t.threshold = &threshold + t.lock.Unlock() t.Log() } // logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any // parents that will be logged, due to threshold limits, and logs them as top level traces. func (t *Trace) logTrace() { + t.lock.RLock() + defer t.lock.RUnlock() if t.durationIsWithinThreshold() { var buffer bytes.Buffer traceNum := rand.Int31() @@ -244,9 +258,13 @@ func (t *Trace) calculateStepThreshold() *time.Duration { traceThreshold := *t.threshold for _, s := range t.traceItems { nestedTrace, ok := s.(*Trace) - if ok && nestedTrace.threshold != nil { - traceThreshold = traceThreshold - *nestedTrace.threshold - lenTrace-- + if ok { + nestedTrace.lock.RLock() + if nestedTrace.threshold != nil { + traceThreshold = traceThreshold - *nestedTrace.threshold + lenTrace-- + } + nestedTrace.lock.RUnlock() } } diff --git a/vendor/modules.txt b/vendor/modules.txt index ec060ab9c..e907cc0ab 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -20,12 +20,6 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 ## explicit; go 1.12 github.com/Azure/go-autorest/tracing -# github.com/PuerkitoBio/purell v1.1.1 -## explicit -github.com/PuerkitoBio/purell -# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 -## explicit -github.com/PuerkitoBio/urlesc # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile @@ -35,10 +29,10 @@ github.com/cespare/xxhash/v2 # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful v2.15.0+incompatible -## explicit -github.com/emicklei/go-restful -github.com/emicklei/go-restful/log +# github.com/emicklei/go-restful/v3 v3.9.0 +## explicit; go 1.13 +github.com/emicklei/go-restful/v3 +github.com/emicklei/go-restful/v3/log # github.com/evanphx/json-patch v4.12.0+incompatible ## explicit github.com/evanphx/json-patch @@ -51,17 +45,19 @@ github.com/fsnotify/fsnotify # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr +github.com/go-logr/logr/funcr # github.com/go-logr/zapr v1.2.0 ## explicit; go 1.16 github.com/go-logr/zapr -# github.com/go-openapi/jsonpointer v0.19.5 +# github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.19.6 +# github.com/go-openapi/jsonreference v0.20.1 ## explicit; go 1.13 github.com/go-openapi/jsonreference -# github.com/go-openapi/swag v0.21.1 -## explicit; go 1.11 +github.com/go-openapi/jsonreference/internal +# github.com/go-openapi/swag v0.22.3 +## explicit; go 1.18 github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 @@ -84,16 +80,17 @@ github.com/google/gnostic/extensions github.com/google/gnostic/jsonschema github.com/google/gnostic/openapiv2 github.com/google/gnostic/openapiv3 -# github.com/google/go-cmp v0.5.6 -## explicit; go 1.8 +# github.com/google/go-cmp v0.5.9 +## explicit; go 1.13 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz +github.com/google/gofuzz/bytesource # github.com/google/uuid v1.1.2 ## explicit github.com/google/uuid @@ -148,8 +145,8 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.0.0 -## explicit; go 1.16 +# github.com/onsi/ginkgo/v2 v2.4.0 +## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -160,8 +157,8 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.18.1 -## explicit; go 1.16 +# github.com/onsi/gomega v1.23.0 +## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -184,6 +181,10 @@ github.com/pborman/uuid # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/projectcalico/api v0.0.0-20230602153125-fb7148692637 +## explicit; go 1.19 +github.com/projectcalico/api/pkg/apis/projectcalico/v3 +github.com/projectcalico/api/pkg/lib/numorstring # github.com/prometheus/client_golang v1.12.1 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus @@ -238,7 +239,7 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 +# golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b ## explicit; go 1.11 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -300,8 +301,8 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/protobuf v1.27.1 -## explicit; go 1.9 +# google.golang.org/protobuf v1.28.1 +## explicit; go 1.11 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt @@ -341,7 +342,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.24.2 +# k8s.io/api v0.26.3 => k8s.io/api v0.24.2 ## explicit; go 1.16 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -399,7 +400,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.24.2 +# k8s.io/apimachinery v0.26.3 => k8s.io/apimachinery v0.24.2 ## explicit; go 1.16 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -446,7 +447,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.24.2 +# k8s.io/client-go v0.26.3 => k8s.io/client-go v0.24.2 ## explicit; go 1.16 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1 @@ -583,27 +584,30 @@ k8s.io/client-go/util/workqueue ## explicit; go 1.16 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 -# k8s.io/klog/v2 v2.60.1 +# k8s.io/klog/v2 v2.80.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer k8s.io/klog/v2/internal/clock +k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 -## explicit; go 1.16 +# k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d +## explicit; go 1.18 k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/handler3 +k8s.io/kube-openapi/pkg/internal k8s.io/kube-openapi/pkg/internal/handler +k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json k8s.io/kube-openapi/pkg/openapiconv k8s.io/kube-openapi/pkg/schemaconv k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 -## explicit; go 1.12 +# k8s.io/utils v0.0.0-20221107191617-1a15be271d1d +## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing @@ -674,11 +678,11 @@ sigs.k8s.io/controller-runtime/pkg/webhook sigs.k8s.io/controller-runtime/pkg/webhook/admission sigs.k8s.io/controller-runtime/pkg/webhook/conversion sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics -# sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 -## explicit; go 1.17 +# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd +## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/structured-merge-diff/v4 v4.2.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/schema @@ -687,3 +691,6 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml +# k8s.io/api => k8s.io/api v0.24.2 +# k8s.io/apimachinery => k8s.io/apimachinery v0.24.2 +# k8s.io/client-go => k8s.io/client-go v0.24.2 diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index 3a8b64547..6a13cf2df 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -75,6 +75,8 @@ import ( // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. @@ -85,15 +87,14 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// -func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error { +func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. @@ -167,16 +168,16 @@ func (e *InvalidUnmarshalError) Error() string { return "json: Unmarshal(nil)" } - if e.Type.Kind() != reflect.Ptr { + if e.Type.Kind() != reflect.Pointer { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" } */ -func (d *decodeState) unmarshal(v interface{}) error { +func (d *decodeState) unmarshal(v any) error { rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { + if rv.Kind() != reflect.Pointer || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } @@ -233,7 +234,7 @@ type decodeState struct { disallowUnknownFields bool savedStrictErrors []error - seenStrictErrors map[string]struct{} + seenStrictErrors map[strictError]struct{} strictFieldStack []string caseSensitive bool @@ -425,7 +426,7 @@ type unquotedValue struct{} // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { +func (d *decodeState) valueQuoted() any { switch d.opcode { default: panic(phasePanicMsg) @@ -467,7 +468,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { haveAddr = true v = v.Addr() } @@ -476,14 +477,14 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { haveAddr = false v = e continue } } - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { break } @@ -678,7 +679,7 @@ func (d *decodeState) object(v reflect.Value) error { reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: - if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil @@ -695,7 +696,7 @@ func (d *decodeState) object(v reflect.Value) error { seenKeys = map[string]struct{}{} } if _, seen := seenKeys[fieldName]; seen { - d.saveStrictError(d.newFieldError("duplicate field", fieldName)) + d.saveStrictError(d.newFieldError(duplicateStrictErrType, fieldName)) } else { seenKeys[fieldName] = struct{}{} } @@ -711,7 +712,7 @@ func (d *decodeState) object(v reflect.Value) error { var seenKeys uint64 checkDuplicateField = func(fieldNameIndex int, fieldName string) { if seenKeys&(1< startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. - ptr := v.Pointer() + ptr := v.UnsafePointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr uintptr + ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe len int - }{v.Pointer(), v.Len()} + }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -893,7 +892,7 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { func newSliceEncoder(t reflect.Type) encoderFunc { // Byte slices get special treatment; arrays don't. if t.Elem().Kind() == reflect.Uint8 { - p := reflect.PtrTo(t.Elem()) + p := reflect.PointerTo(t.Elem()) if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) { return encodeByteSlice } @@ -989,7 +988,7 @@ func isValidTag(s string) bool { func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } t = t.Field(i).Type @@ -1009,7 +1008,7 @@ func (w *reflectWithString) resolve() error { return nil } if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { - if w.k.Kind() == reflect.Ptr && w.k.IsNil() { + if w.k.Kind() == reflect.Pointer && w.k.IsNil() { return nil } buf, err := tm.MarshalText() @@ -1243,7 +1242,7 @@ func typeFields(t reflect.Type) structFields { sf := f.typ.Field(i) if sf.Anonymous { t := sf.Type - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } if !sf.IsExported() && t.Kind() != reflect.Struct { @@ -1269,7 +1268,7 @@ func typeFields(t reflect.Type) structFields { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go index 9e170127d..ab249b2bb 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go @@ -24,8 +24,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go index d3fa2d111..b8f4ff2c1 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build gofuzz -// +build gofuzz package json @@ -12,10 +11,10 @@ import ( ) func Fuzz(data []byte) (score int) { - for _, ctor := range []func() interface{}{ - func() interface{} { return new(interface{}) }, - func() interface{} { return new(map[string]interface{}) }, - func() interface{} { return new([]interface{}) }, + for _, ctor := range []func() any{ + func() any { return new(any) }, + func() any { return new(map[string]any) }, + func() any { return new([]any) }, } { v := ctor() err := Unmarshal(data, v) diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go index cb9ab0627..e1c0a74d9 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go @@ -18,7 +18,6 @@ package json import ( gojson "encoding/json" - "fmt" "strconv" "strings" ) @@ -71,32 +70,37 @@ func (d *Decoder) DisallowDuplicateFields() { d.d.disallowDuplicateFields = true } -func (d *decodeState) newFieldError(msg, field string) error { +func (d *decodeState) newFieldError(errType strictErrType, field string) *strictError { if len(d.strictFieldStack) > 0 { - return fmt.Errorf("%s %q", msg, strings.Join(d.strictFieldStack, "")+"."+field) + return &strictError{ + ErrType: errType, + Path: strings.Join(d.strictFieldStack, "") + "." + field, + } } else { - return fmt.Errorf("%s %q", msg, field) + return &strictError{ + ErrType: errType, + Path: field, + } } } // saveStrictError saves a strict decoding error, // for reporting at the end of the unmarshal if no other errors occurred. -func (d *decodeState) saveStrictError(err error) { +func (d *decodeState) saveStrictError(err *strictError) { // prevent excessive numbers of accumulated errors if len(d.savedStrictErrors) >= 100 { return } // dedupe accumulated strict errors if d.seenStrictErrors == nil { - d.seenStrictErrors = map[string]struct{}{} + d.seenStrictErrors = map[strictError]struct{}{} } - msg := err.Error() - if _, seen := d.seenStrictErrors[msg]; seen { + if _, seen := d.seenStrictErrors[*err]; seen { return } // accumulate the error - d.seenStrictErrors[msg] = struct{}{} + d.seenStrictErrors[*err] = struct{}{} d.savedStrictErrors = append(d.savedStrictErrors, err) } @@ -118,6 +122,33 @@ func (d *decodeState) appendStrictFieldStackIndex(i int) { d.strictFieldStack = append(d.strictFieldStack, "[", strconv.Itoa(i), "]") } +type strictErrType string + +const ( + unknownStrictErrType strictErrType = "unknown field" + duplicateStrictErrType strictErrType = "duplicate field" +) + +// strictError is a strict decoding error +// It has an ErrType (either unknown or duplicate) +// and a path to the erroneous field +type strictError struct { + ErrType strictErrType + Path string +} + +func (e *strictError) Error() string { + return string(e.ErrType) + " " + strconv.Quote(e.Path) +} + +func (e *strictError) FieldPath() string { + return e.Path +} + +func (e *strictError) SetFieldPath(path string) { + e.Path = path +} + // UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives. // If this is returned from Unmarshal(), it means the decoding was successful in all other respects. type UnmarshalStrictError struct { diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go index 9dc1903e2..22fc6922d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go @@ -27,6 +27,7 @@ func Valid(data []byte) bool { // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. +// checkValid returns nil or a SyntaxError. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { @@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error { } // A SyntaxError is a description of a JSON syntax error. +// Unmarshal will return a SyntaxError if the JSON can't be parsed. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes @@ -83,7 +85,7 @@ type scanner struct { } var scannerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &scanner{} }, } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 5f87df1c6..1967755ac 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -45,7 +45,7 @@ func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. -func (dec *Decoder) Decode(v interface{}) error { +func (dec *Decoder) Decode(v any) error { if dec.err != nil { return dec.err } @@ -197,7 +197,7 @@ func NewEncoder(w io.Writer) *Encoder { // // See the documentation for Marshal for details about the // conversion of Go values to JSON. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { if enc.err != nil { return enc.err } @@ -289,8 +289,7 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// -type Token interface{} +type Token any */ const ( @@ -457,7 +456,7 @@ func (dec *Decoder) Token() (Token, error) { if !dec.tokenValueAllowed() { return dec.tokenError(c) } - var x interface{} + var x any if err := dec.Decode(&x); err != nil { return nil, err } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go index c38fd5102..b490328f4 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go @@ -15,10 +15,8 @@ type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") + tag, opt, _ := strings.Cut(tag, ",") + return tag, tagOptions(opt) } // Contains reports whether a comma-separated list of options @@ -30,15 +28,11 @@ func (o tagOptions) Contains(optionName string) bool { } s := string(o) for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { + var name string + name, s, _ = strings.Cut(s, ",") + if name == optionName { return true } - s = next } return false } diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go index 764e2a84c..e8f31b16c 100644 --- a/vendor/sigs.k8s.io/json/json.go +++ b/vendor/sigs.k8s.io/json/json.go @@ -34,13 +34,13 @@ type Decoder interface { } // NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { d := internaljson.NewDecoder(r) d.CaseSensitive() @@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { // UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v. // // UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error { return internaljson.Unmarshal( data, @@ -84,6 +84,8 @@ const ( // and a list of the strict failures (if any) are returned. If no `strictOptions` are selected, // all supported strict checks are performed. // +// Strict errors returned will implement the FieldError interface for the specific erroneous fields. +// // Currently supported strict checks are: // - DisallowDuplicateFields: ensure the data contains no duplicate fields // - DisallowUnknownFields: ensure the data contains no unknown fields (when decoding into typed structs) @@ -137,3 +139,12 @@ func SyntaxErrorOffset(err error) (isSyntaxError bool, offset int64) { return false, 0 } } + +// FieldError is an error that provides access to the path of the erroneous field +type FieldError interface { + error + // FieldPath provides the full path of the erroneous field within the json object. + FieldPath() string + // SetFieldPath updates the path of the erroneous field output in the error message. + SetFieldPath(path string) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go index 01103b38a..7e5dc7582 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/elements.go @@ -16,7 +16,9 @@ limitations under the License. package schema -import "sync" +import ( + "sync" +) // Schema is a list of named types. // @@ -27,6 +29,11 @@ type Schema struct { once sync.Once m map[string]TypeDef + + lock sync.Mutex + // Cached results of resolving type references to atoms. Only stores + // type references which require fields of Atom to be overriden. + resolvedTypes map[TypeRef]Atom } // A TypeSpecifier references a particular type in a schema. @@ -48,6 +55,12 @@ type TypeRef struct { // Either the name or one member of Atom should be set. NamedType *string `yaml:"namedType,omitempty"` Inlined Atom `yaml:",inline,omitempty"` + + // If this reference refers to a map-type or list-type, this field overrides + // the `ElementRelationship` of the referred type when resolved. + // If this field is nil, then it has no effect. + // See `Map` and `List` for more information about `ElementRelationship` + ElementRelationship *ElementRelationship `yaml:"elementRelationship,omitempty"` } // Atom represents the smallest possible pieces of the type system. @@ -88,11 +101,11 @@ const ( // Map is a key-value pair. Its default semantics are the same as an // associative list, but: -// * It is serialized differently: +// - It is serialized differently: // map: {"k": {"value": "v"}} // list: [{"key": "k", "value": "v"}] -// * Keys must be string typed. -// * Keys can't have multiple components. +// - Keys must be string typed. +// - Keys can't have multiple components. // // Optionally, maps may be atomic (for example, imagine representing an RGB // color value--it doesn't make sense to have different actors own the R and G @@ -146,6 +159,31 @@ func (m *Map) FindField(name string) (StructField, bool) { return sf, ok } +// CopyInto this instance of Map into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (m *Map) CopyInto(dst *Map) { + if dst == nil { + return + } + + // Map type is considered immutable so sharing references + dst.Fields = m.Fields + dst.ElementType = m.ElementType + dst.Unions = m.Unions + dst.ElementRelationship = m.ElementRelationship + + if m.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = m.m + }) + } +} + // UnionFields are mapping between the fields that are part of the union and // their discriminated value. The discriminated value has to be set, and // should not conflict with other discriminated value in the list. @@ -244,18 +282,93 @@ func (s *Schema) FindNamedType(name string) (TypeDef, bool) { return t, ok } +func (s *Schema) resolveNoOverrides(tr TypeRef) (Atom, bool) { + result := Atom{} + + if tr.NamedType != nil { + t, ok := s.FindNamedType(*tr.NamedType) + if !ok { + return Atom{}, false + } + + result = t.Atom + } else { + result = tr.Inlined + } + + return result, true +} + // Resolve is a convenience function which returns the atom referenced, whether // it is inline or named. Returns (Atom{}, false) if the type can't be resolved. // // This allows callers to not care about the difference between a (possibly // inlined) reference and a definition. func (s *Schema) Resolve(tr TypeRef) (Atom, bool) { - if tr.NamedType != nil { - t, ok := s.FindNamedType(*tr.NamedType) - if !ok { + // If this is a plain reference with no overrides, just return the type + if tr.ElementRelationship == nil { + return s.resolveNoOverrides(tr) + } + + s.lock.Lock() + defer s.lock.Unlock() + + if s.resolvedTypes == nil { + s.resolvedTypes = make(map[TypeRef]Atom) + } + + var result Atom + var exists bool + + // Return cached result if available + // If not, calculate result and cache it + if result, exists = s.resolvedTypes[tr]; !exists { + if result, exists = s.resolveNoOverrides(tr); exists { + // Allow field-level electives to override the referred type's modifiers + switch { + case result.Map != nil: + mapCopy := Map{} + result.Map.CopyInto(&mapCopy) + mapCopy.ElementRelationship = *tr.ElementRelationship + result.Map = &mapCopy + case result.List != nil: + listCopy := *result.List + listCopy.ElementRelationship = *tr.ElementRelationship + result.List = &listCopy + case result.Scalar != nil: + return Atom{}, false + default: + return Atom{}, false + } + } else { return Atom{}, false } - return t.Atom, true + + // Save result. If it is nil, that is also recorded as not existing. + s.resolvedTypes[tr] = result + } + + return result, true +} + +// Clones this instance of Schema into the other +// If other is nil this method does nothing. +// If other is already initialized, overwrites it with this instance +// Warning: Not thread safe +func (s *Schema) CopyInto(dst *Schema) { + if dst == nil { + return + } + + // Schema type is considered immutable so sharing references + dst.Types = s.Types + + if s.m != nil { + // If cache is non-nil then the once token had been consumed. + // Must reset token and use it again to ensure same semantics. + dst.once = sync.Once{} + dst.once.Do(func() { + dst.m = s.m + }) } - return tr.Inlined, true } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go index 4c303eecc..b668eff83 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/equals.go @@ -52,6 +52,9 @@ func (a *TypeRef) Equals(b *TypeRef) bool { } //return true } + if a.ElementRelationship != b.ElementRelationship { + return false + } return a.Inlined.Equals(&b.Inlined) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index bb60e2a5f..7d64d1308 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -66,6 +66,9 @@ var SchemaSchemaYAML = `types: - name: untyped type: namedType: untyped + - name: elementRelationship + type: + scalar: string - name: scalar scalar: string - name: map diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 6b2b2cb4a..19c77334f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -105,7 +105,11 @@ type atomHandler interface { func resolveSchema(s *schema.Schema, tr schema.TypeRef, v value.Value, ah atomHandler) ValidationErrors { a, ok := s.Resolve(tr) if !ok { - return errorf("schema error: no type found matching: %v", *tr.NamedType) + typeName := "inlined type" + if tr.NamedType != nil { + typeName = *tr.NamedType + } + return errorf("schema error: no type found matching: %v", typeName) } a = deduceAtom(a, v) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 75244ef64..913644083 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -80,7 +80,12 @@ func (w *mergingWalker) merge(prefixFn func() string) (errs ValidationErrors) { alhs := deduceAtom(a, w.lhs) arhs := deduceAtom(a, w.rhs) - if alhs.Equals(&arhs) { + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { errs = append(errs, handleAtom(arhs, w.typeRef, w)...) } else { w2 := *w diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go index 2b98b729c..6a7697e3b 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/reconcile_schema.go @@ -110,7 +110,7 @@ func (v *reconcileWithSchemaWalker) finishDescent(v2 *reconcileWithSchemaWalker) } // ReconcileFieldSetWithSchema reconciles the a field set with any changes to the -//// object's schema since the field set was written. Returns the reconciled field set, or nil of +// object's schema since the field set was written. Returns the reconciled field set, or nil of // no changes were made to the field set. // // Supports: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index e9e6be8be..d63a97fe2 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -99,12 +99,13 @@ func (tv TypedValue) ToFieldSet() (*fieldpath.Set, error) { // Merge returns the result of merging tv and pso ("partially specified // object") together. Of note: -// * No fields can be removed by this operation. -// * If both tv and pso specify a given leaf field, the result will keep pso's -// value. -// * Container typed elements will have their items ordered: -// * like tv, if pso doesn't change anything in the container -// * like pso, if pso does change something in the container. +// - No fields can be removed by this operation. +// - If both tv and pso specify a given leaf field, the result will keep pso's +// value. +// - Container typed elements will have their items ordered: +// 1. like tv, if pso doesn't change anything in the container +// 2. like pso, if pso does change something in the container. +// // tv and pso must both be of the same type (their Schema and TypeRef must // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. From f31809a93261db6cf37304198074ff0c1a02a31f Mon Sep 17 00:00:00 2001 From: Rostislav Porohnya Date: Wed, 22 Nov 2023 19:15:47 +0200 Subject: [PATCH 4/4] Implemented IPPool and IPreservations creation flow and static IPs set --- config/rbac/role.yaml | 26 +++ .../clusters_v1beta1_cassandra.yaml | 2 +- controllers/clusters/cassandra_controller.go | 2 + controllers/clusters/on_premises.go | 211 +++++++++++++++++- main.go | 2 + pkg/models/errors.go | 2 + pkg/models/on_premises.go | 3 + pkg/models/operator.go | 1 + 8 files changed, 238 insertions(+), 11 deletions(-) diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 167097051..223286d3c 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -862,3 +862,29 @@ rules: - patch - update - watch +- apiGroups: + - projectcalico.org + resources: + - ippools + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +- apiGroups: + - projectcalico.org + resources: + - ipreservations + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch diff --git a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml index 8b7968872..7f7cf180c 100644 --- a/config/samples/onpremises/clusters_v1beta1_cassandra.yaml +++ b/config/samples/onpremises/clusters_v1beta1_cassandra.yaml @@ -3,7 +3,7 @@ kind: Cassandra metadata: name: cassandra-on-prem-cluster spec: - name: "danylo-on-prem-cassandra" + name: "rostyslp-on-prem-cassandra" version: "4.0.10" privateNetworkCluster: false onPremisesSpec: diff --git a/controllers/clusters/cassandra_controller.go b/controllers/clusters/cassandra_controller.go index 610cf7ea4..36ff16080 100644 --- a/controllers/clusters/cassandra_controller.go +++ b/controllers/clusters/cassandra_controller.go @@ -67,6 +67,8 @@ type CassandraReconciler struct { //+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=projectcalico.org,resources=ipreservations,verbs=get;list;watch;create;update;patch;delete;deletecollection +//+kubebuilder:rbac:groups=projectcalico.org,resources=ippools,verbs=get;list;watch;create;update;patch;delete;deletecollection // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. diff --git a/controllers/clusters/on_premises.go b/controllers/clusters/on_premises.go index 78ca937cc..0782f6823 100644 --- a/controllers/clusters/on_premises.go +++ b/controllers/clusters/on_premises.go @@ -19,8 +19,10 @@ package clusters import ( "context" "fmt" + "net" "strings" + apicalico "github.com/projectcalico/api/pkg/apis/projectcalico/v3" k8scorev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" @@ -74,18 +76,23 @@ func newOnPremisesBootstrap( } func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBootstrap) error { - if len(b.ClusterStatus.DataCentres) < 1 { - return fmt.Errorf("datacenter ID is empty") + if len(b.ClusterStatus.DataCentres) == 0 { + return fmt.Errorf("datacenter ID in status is empty") + } + + reservedCIDR, err := reconcileIPReservations(ctx, b.K8sClient) + if err != nil { + return err } if b.PrivateNetworkCluster { - err := reconcileSSHGatewayResources(ctx, b) + err := reconcileSSHGatewayResources(ctx, b, reservedCIDR) if err != nil { return err } } - err := reconcileNodesResources(ctx, b) + err = reconcileNodesResources(ctx, b, reservedCIDR) if err != nil { return err } @@ -93,7 +100,7 @@ func handleCreateOnPremisesClusterResources(ctx context.Context, b *onPremisesBo return nil } -func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) error { +func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap, reservedCIDR string) error { gatewayDVSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) if err != nil { return err @@ -122,6 +129,11 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e gatewayName := fmt.Sprintf("%s-%s", models.GatewayVMPrefix, strings.ToLower(b.K8sObject.GetName())) + ip, err := getAvailableIP(ctx, reservedCIDR, b.K8sClient) + if err != nil { + return err + } + gatewayVM := &virtcorev1.VirtualMachine{} err = b.K8sClient.Get(ctx, types.NamespacedName{ Namespace: b.K8sObject.GetNamespace(), @@ -138,6 +150,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e b.ClusterStatus.DataCentres[0].ID, models.GatewayRack, gatewayDV.Name, + ip, gatewayCPU, gatewayMemory) if err != nil { @@ -175,7 +188,7 @@ func reconcileSSHGatewayResources(ctx context.Context, b *onPremisesBootstrap) e return nil } -func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error { +func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap, reservedCIDR string) error { for i, node := range b.ClusterStatus.DataCentres[0].Nodes { nodeOSDiskSize, err := resource.ParseQuantity(b.OnPremisesSpec.OSDiskSize) if err != nil { @@ -223,6 +236,11 @@ func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error nodeName := fmt.Sprintf("%s-%d-%s", models.NodeVMPrefix, i, strings.ToLower(b.K8sObject.GetName())) + ip, err := getAvailableIP(ctx, reservedCIDR, b.K8sClient) + if err != nil { + return err + } + nodeVM := &virtcorev1.VirtualMachine{} err = b.K8sClient.Get(ctx, types.NamespacedName{ Namespace: b.K8sObject.GetNamespace(), @@ -239,6 +257,7 @@ func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error node.ID, node.Rack, nodeOSDV.Name, + ip, nodeCPU, nodeMemory, nodeDataDV.Name, @@ -301,6 +320,170 @@ func reconcileNodesResources(ctx context.Context, b *onPremisesBootstrap) error return nil } +func reconcileIPReservations(ctx context.Context, k8sclient client.Client) (string, error) { + ipReservationsList := &apicalico.IPReservationList{} + err := k8sclient.List(ctx, ipReservationsList, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ControlledByLabel: models.OperatorLabel, + }), + }) + if err != nil { + return "", err + } + + for _, reservation := range ipReservationsList.Items { + return reservation.Spec.ReservedCIDRs[0], nil + } + + ipPoolList := &apicalico.IPPoolList{} + err = k8sclient.List(ctx, ipPoolList, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(map[string]string{ + models.ControlledByLabel: models.OperatorLabel, + }), + }) + if err != nil { + return "", err + } + + if len(ipPoolList.Items) == 0 { + clusterIPPoolList := &apicalico.IPPoolList{} + err = k8sclient.List(ctx, clusterIPPoolList) + if err != nil { + return "", err + } + + var usedCIDRs []string + for _, pool := range clusterIPPoolList.Items { + usedCIDRs = append(usedCIDRs, pool.Spec.CIDR) + } + + newIPPool := apicalico.NewIPPool() + newIPPool.ObjectMeta.Name = models.IPPoolName + newIPPool.ObjectMeta.Labels = map[string]string{ + models.ControlledByLabel: models.OperatorLabel, + } + availCIDR, err := getAvailablePrivateCIDR(usedCIDRs) + if err != nil { + return "", err + } + + newIPPool.Spec.CIDR = availCIDR + + err = k8sclient.Create(ctx, newIPPool) + if err != nil { + return "", err + } + + ipPoolList.Items = append(ipPoolList.Items, *newIPPool) + } + + newReservation := &apicalico.IPReservation{ + TypeMeta: metav1.TypeMeta{ + Kind: apicalico.KindIPReservation, + APIVersion: apicalico.VersionCurrent, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: models.IPReservationName, + Labels: map[string]string{ + models.ControlledByLabel: models.OperatorLabel, + }, + }, + Spec: apicalico.IPReservationSpec{ + ReservedCIDRs: []string{ipPoolList.Items[0].Spec.CIDR}, + }, + } + + err = k8sclient.Create(ctx, newReservation) + if err != nil { + return "", err + } + + return newReservation.Spec.ReservedCIDRs[0], nil +} + +// Calculates available CIDR from 10.*.*.* range with mask /24 +func getAvailablePrivateCIDR(unavailCIDRs []string) (string, error) { + for second := 0; second < 255; second++ { + for third := 0; third < 255; third++ { + checkNet := net.IPNet{ + IP: net.ParseIP(fmt.Sprintf("10.%d.%d.0", second, third)), + Mask: net.CIDRMask(24, 32), + } + cidrAvail := true + for _, unavailCIDR := range unavailCIDRs { + ip, _, err := net.ParseCIDR(unavailCIDR) + if err != nil { + return "", err + } + + if checkNet.Contains(ip) { + cidrAvail = false + break + } + } + + if cidrAvail { + return checkNet.String(), nil + } + } + } + + return "", models.ErrNoAvailableCIDRsForIPPool +} + +// Checks available IPs, fetching pods, VMs and checking their IPs. Assumes /24 ipmask +func getAvailableIP(ctx context.Context, cidr string, k8sclient client.Client) (string, error) { + ip, _, err := net.ParseCIDR(cidr) + if err != nil { + return "", err + } + + podList := &k8scorev1.PodList{} + err = k8sclient.List(ctx, podList) + if err != nil { + return "", err + } + + vmList := &virtcorev1.VirtualMachineList{} + err = k8sclient.List(ctx, vmList) + if err != nil { + return "", err + } + + ipStrings := strings.Split(ip.String(), ".") + for i := 1; i < 255; i++ { + checkIP := fmt.Sprintf("%s.%s.%s.%d", + ipStrings[0], + ipStrings[1], + ipStrings[2], + i) + + ipAvail := true + ipAnnot := fmt.Sprintf("[\"%s\"]", checkIP) + for _, pod := range podList.Items { + if pod.Annotations[models.IPAddrsAnnotation] == ipAnnot { + ipAvail = false + } + } + + if !ipAvail { + continue + } + + for _, vm := range vmList.Items { + if vm.Annotations[models.IPAddrsAnnotation] == ipAnnot { + ipAvail = false + } + } + + if ipAvail { + return checkIP, nil + } + } + + return "", models.ErrNoAvailableIPsInReservation +} + func createDV( ctx context.Context, b *onPremisesBootstrap, @@ -385,7 +568,8 @@ func newVM( vmName, nodeID, nodeRack, - OSDiskDVName string, + OSDiskDVName, + ip string, cpu, memory resource.Quantity, storageDVNames ...string, @@ -413,15 +597,19 @@ func newVM( labelSet[models.NodeLabel] = models.WorkerNode } + ipAnnot := fmt.Sprintf("[\"%s\"]", ip) vm := &virtcorev1.VirtualMachine{ TypeMeta: metav1.TypeMeta{ Kind: models.VirtualMachineKind, APIVersion: models.KubevirtV1APIVersion, }, ObjectMeta: metav1.ObjectMeta{ - Name: vmName, - Namespace: b.K8sObject.GetNamespace(), - Labels: labelSet, + Name: vmName, + Namespace: b.K8sObject.GetNamespace(), + Labels: labelSet, + Annotations: map[string]string{ + models.IPAddrsAnnotation: ipAnnot, + }, Finalizers: []string{models.DeletionFinalizer}, }, Spec: virtcorev1.VirtualMachineSpec{ @@ -429,6 +617,9 @@ func newVM( Template: &virtcorev1.VirtualMachineInstanceTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labelSet, + Annotations: map[string]string{ + models.IPAddrsAnnotation: ipAnnot, + }, }, Spec: virtcorev1.VirtualMachineInstanceSpec{ Hostname: vmName, diff --git a/main.go b/main.go index c29ce497b..9a0d4a3ea 100644 --- a/main.go +++ b/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + apicalico "github.com/projectcalico/api/pkg/apis/projectcalico/v3" "os" "time" @@ -57,6 +58,7 @@ func init() { utilruntime.Must(kafkamanagementv1beta1.AddToScheme(scheme)) utilruntime.Must(cdiv1beta1.AddToScheme(scheme)) utilruntime.Must(virtcorev1.AddToScheme(scheme)) + utilruntime.Must(apicalico.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } diff --git a/pkg/models/errors.go b/pkg/models/errors.go index f57193624..894fe8676 100644 --- a/pkg/models/errors.go +++ b/pkg/models/errors.go @@ -68,4 +68,6 @@ var ( ErrExposeServiceEndpointsNotCreatedYet = errors.New("expose service endpoints is not created yet") ErrOnlySingleConcurrentResizeAvailable = errors.New("only single concurrent resize is allowed") ErrBundledUseOnlyResourceUpdateIsNotSupported = errors.New("updating of bundled use resource is not supported") + ErrNoAvailableCIDRsForIPPool = errors.New("no available CIDRs for IPPool") + ErrNoAvailableIPsInReservation = errors.New("no available IPs in IPreservation") ) diff --git a/pkg/models/on_premises.go b/pkg/models/on_premises.go index 3d0dbd278..fb4228a09 100644 --- a/pkg/models/on_premises.go +++ b/pkg/models/on_premises.go @@ -15,6 +15,7 @@ const ( NodeIDLabel = "nodeID" NodeRackLabel = "nodeRack" NodeLabel = "node" + IPAddrsAnnotation = "cni.projectcalico.org/ipAddrs" NodeOSDVPrefix = "node-os-data-volume-pvc" NodeDVPrefix = "node-data-volume-pvc" NodeVMPrefix = "node-vm" @@ -26,6 +27,8 @@ const ( GatewayRack = "ssh-gateway-rack" IgnitionScriptSecretPrefix = "ignition-script-secret" DataDisk = "data-disk" + IPReservationName = "instaclustr-operator-ip-reservation" + IPPoolName = "instaclustr-operator-ip-pool" Boot = "boot" Storage = "storage" diff --git a/pkg/models/operator.go b/pkg/models/operator.go index 399d61c49..42b4aded2 100644 --- a/pkg/models/operator.go +++ b/pkg/models/operator.go @@ -39,6 +39,7 @@ const ( RedisUserNamespaceLabel = "instaclustr.com/redisUserNamespace" PostgreSQLUserNamespaceLabel = "instaclustr.com/postgresqlUserNamespace" OpenSearchUserNamespaceLabel = "instaclustr.com/openSearchUserNamespace" + OperatorLabel = "instaclustr-operator" CassandraKind = "Cassandra" CassandraChildPrefix = "cassandra-"