From 95030c70e246a5926aaee4204e19abf7fa8ac0d5 Mon Sep 17 00:00:00 2001 From: Mathew Merrick Date: Tue, 19 Mar 2024 19:34:00 +0000 Subject: [PATCH 1/2] init --- Makefile | 2 +- go.mod | 49 ++- go.sum | 158 +++++++- test/e2e/README.md | 19 + .../azure/create-cluster-with-npm.go | 141 +++++++ test/e2e/framework/azure/create-cluster.go | 115 ++++++ test/e2e/framework/azure/create-rg.go | 48 +++ test/e2e/framework/azure/create-vnet.go | 110 ++++++ test/e2e/framework/azure/delete-cluster.go | 48 +++ test/e2e/framework/azure/delete-rg.go | 46 +++ test/e2e/framework/azure/enable-ama.go | 117 ++++++ test/e2e/framework/azure/get-kubeconfig.go | 53 +++ test/e2e/framework/generic/load-tag.go | 48 +++ .../kubernetes/create-agnhost-statefulset.go | 157 ++++++++ .../kubernetes/create-kapinger-deployment.go | 256 +++++++++++++ .../kubernetes/create-network-policy.go | 110 ++++++ .../framework/kubernetes/create-resource.go | 225 +++++++++++ .../framework/kubernetes/delete-resource.go | 353 ++++++++++++++++++ test/e2e/framework/kubernetes/exec-pod.go | 89 +++++ test/e2e/framework/kubernetes/get-logs.go | 57 +++ .../kubernetes/install-retina-helm.go | 116 ++++++ test/e2e/framework/kubernetes/port-forward.go | 168 +++++++++ test/e2e/framework/kubernetes/portforward.go | 196 ++++++++++ .../framework/kubernetes/wait-pod-ready.go | 62 +++ test/e2e/framework/prometheus/prometheus.go | 103 +++++ test/e2e/framework/types/background_test.go | 96 +++++ test/e2e/framework/types/job.go | 320 ++++++++++++++++ test/e2e/framework/types/jobvalues.go | 54 +++ test/e2e/framework/types/runner.go | 28 ++ test/e2e/framework/types/scenarios_test.go | 126 +++++++ test/e2e/framework/types/step.go | 41 ++ test/e2e/framework/types/step_sleep.go | 24 ++ test/e2e/framework/types/step_stop.go | 30 ++ test/e2e/scenarios/retina/drop/scenario.go | 107 ++++++ .../retina/drop/validate-drop-metric.go | 58 +++ .../scenarios/retina/retina_scenarios_test.go | 81 ++++ test/e2e/scenarios/retina/tcp/scenario.go | 91 +++++ .../retina/tcp/validate-flow-metric.go | 50 +++ .../tcp/validate-tcp-connection-remote.go | 45 +++ test/retry/retry.go | 39 ++ 40 files changed, 4025 insertions(+), 11 deletions(-) create mode 100644 test/e2e/README.md create mode 100644 test/e2e/framework/azure/create-cluster-with-npm.go create mode 100644 test/e2e/framework/azure/create-cluster.go create mode 100644 test/e2e/framework/azure/create-rg.go create mode 100644 test/e2e/framework/azure/create-vnet.go create mode 100644 test/e2e/framework/azure/delete-cluster.go create mode 100644 test/e2e/framework/azure/delete-rg.go create mode 100644 test/e2e/framework/azure/enable-ama.go create mode 100644 test/e2e/framework/azure/get-kubeconfig.go create mode 100644 test/e2e/framework/generic/load-tag.go create mode 100644 test/e2e/framework/kubernetes/create-agnhost-statefulset.go create mode 100644 test/e2e/framework/kubernetes/create-kapinger-deployment.go create mode 100644 test/e2e/framework/kubernetes/create-network-policy.go create mode 100644 test/e2e/framework/kubernetes/create-resource.go create mode 100644 test/e2e/framework/kubernetes/delete-resource.go create mode 100644 test/e2e/framework/kubernetes/exec-pod.go create mode 100644 test/e2e/framework/kubernetes/get-logs.go create mode 100644 test/e2e/framework/kubernetes/install-retina-helm.go create mode 100644 test/e2e/framework/kubernetes/port-forward.go create mode 100644 test/e2e/framework/kubernetes/portforward.go create mode 100644 test/e2e/framework/kubernetes/wait-pod-ready.go create mode 100644 test/e2e/framework/prometheus/prometheus.go create mode 100644 test/e2e/framework/types/background_test.go create mode 100644 test/e2e/framework/types/job.go create mode 100644 test/e2e/framework/types/jobvalues.go create mode 100644 test/e2e/framework/types/runner.go create mode 100644 test/e2e/framework/types/scenarios_test.go create mode 100644 test/e2e/framework/types/step.go create mode 100644 test/e2e/framework/types/step_sleep.go create mode 100644 test/e2e/framework/types/step_stop.go create mode 100644 test/e2e/scenarios/retina/drop/scenario.go create mode 100644 test/e2e/scenarios/retina/drop/validate-drop-metric.go create mode 100644 test/e2e/scenarios/retina/retina_scenarios_test.go create mode 100644 test/e2e/scenarios/retina/tcp/scenario.go create mode 100644 test/e2e/scenarios/retina/tcp/validate-flow-metric.go create mode 100644 test/e2e/scenarios/retina/tcp/validate-tcp-connection-remote.go create mode 100644 test/retry/retry.go diff --git a/Makefile b/Makefile index 357bd76f37..0df2f17579 100644 --- a/Makefile +++ b/Makefile @@ -368,7 +368,7 @@ COVER_PKG ?= . test: $(ENVTEST) # Run unit tests. go build -o test-summary ./test/utsummary/main.go - CGO_ENABLED=0 KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use -p path)" go test -tags=unit -coverprofile=coverage.out -v -json ./... | ./test-summary --progress --verbose + CGO_ENABLED=0 KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use -p path)" go test -tags=unit -skip=TestE2E* -coverprofile=coverage.out -v -json ./... | ./test-summary --progress --verbose coverage: # Code coverage. # go generate ./... && go test -tags=unit -coverprofile=coverage.out.tmp ./... diff --git a/go.mod b/go.mod index 0fbd513f08..201128663e 100644 --- a/go.mod +++ b/go.mod @@ -15,8 +15,7 @@ require ( code.cloudfoundry.org/clock v1.0.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.29 // indirect @@ -24,7 +23,13 @@ require ( github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/Masterminds/squirrel v1.5.4 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -44,20 +49,27 @@ require ( github.com/containerd/typeurl/v2 v2.1.1 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.5.0 // indirect + github.com/docker/cli v25.0.3+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.2.3 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -72,16 +84,20 @@ require ( github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/hashicorp/consul/api v1.26.1 // indirect @@ -95,24 +111,33 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hashicorp/serf v0.10.1 // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmoiron/sqlx v1.3.5 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/klauspost/compress v1.17.3 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mdlayher/netlink v1.7.2 // indirect github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/locker v1.0.1 // indirect github.com/moby/moby v25.0.3+incompatible // indirect github.com/moby/spdystream v0.2.0 // indirect @@ -134,17 +159,19 @@ require ( github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rubenv/sql-migrate v1.5.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/s3rj1k/go-fanotify/fanotify v0.0.0-20210917134616-9c00a300bb7a // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil/v3 v3.23.2 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect @@ -154,6 +181,9 @@ require ( github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.7.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xlab/treeprint v1.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.etcd.io/etcd/api/v3 v3.5.11 // indirect @@ -179,8 +209,10 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + k8s.io/apiserver v0.29.3 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/cri-api v0.29.2 // indirect + oras.land/oras-go v1.2.4 // indirect sigs.k8s.io/kustomize/api v0.14.0 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect ) @@ -199,7 +231,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.8.4 go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.20.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.16.0 // indirect golang.org/x/sync v0.6.0 golang.org/x/sys v0.17.0 @@ -221,6 +253,13 @@ require ( require ( github.com/Azure/azure-container-networking/zapai v0.0.3 github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dashboard/armdashboard v1.2.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.1.0 + github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 github.com/Microsoft/hcsshim v0.12.0-rc.3 github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 @@ -237,6 +276,7 @@ require ( github.com/onsi/gomega v1.32.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_model v0.5.0 + github.com/prometheus/common v0.45.0 github.com/safchain/ethtool v0.3.0 github.com/spf13/viper v1.18.2 github.com/vishvananda/netlink v1.2.1-beta.2.0.20231127184239-0ced8385386a @@ -246,6 +286,7 @@ require ( gopkg.in/natefinch/lumberjack.v2 v2.2.1 gotest.tools v2.2.0+incompatible gotest.tools/v3 v3.5.1 + helm.sh/helm/v3 v3.14.3 k8s.io/apiextensions-apiserver v0.29.3 k8s.io/cli-runtime v0.29.3 k8s.io/kubectl v0.29.3 diff --git a/go.sum b/go.sum index b75e93d360..c19a309b5e 100644 --- a/go.sum +++ b/go.sum @@ -10,12 +10,26 @@ github.com/Azure/azure-container-networking/zapai v0.0.3 h1:73druF1cnne5Ign/ztiX github.com/Azure/azure-container-networking/zapai v0.0.3/go.mod h1:XV/aKJQAV6KqV4HQtZlDyxg2z7LaY9rsX8dqwyWFmUI= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0 h1:g65N4m1sAjm0BkjIJYtp5qnJlkoFtd6oqfa27KO9fI4= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4 v4.7.0/go.mod h1:noQIdW75SiQFB3mSFJBr4iRRH83S9skaFiBv4C0uEs0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dashboard/armdashboard v1.2.0 h1:MRPU8Bge2f9tkfG3PCr4vEnqXl8XOSjlhuK3l+8Hvkc= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dashboard/armdashboard v1.2.0/go.mod h1:xYrOYxajQvXMlp6M1E3amlaqPDXspyJxmjqTsGo6Jmw= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0 h1:PTFGRSlMKCQelWwxUyYVEUqseBJVemLyqWJjvMyt0do= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/internal/v2 v2.0.0/go.mod h1:LRr2FzBTQlONPPa5HREE5+RjSCTXl7BwOvYOaWTqCaI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0 h1:pPvTJ1dY0sA35JOeFq6TsY2xj6Z85Yo23Pj4wCCvu4o= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/managementgroups/armmanagementgroups v1.0.0/go.mod h1:mLfWfj8v3jfWKsL9G4eoBoXVcsqcIUTapmdKy7uGOp0= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0 h1:Ds0KRF8ggpEGg4Vo42oX1cIt/IfOhHWJBikksZbVxeg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor v0.11.0/go.mod h1:jj6P8ybImR+5topJ+eH6fgcemSFBmU6/6bFF8KkwuDI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.1.0 h1:0hndC8rxv2LXHWLNMVMjQrAuOyxNNMEGKDlKCSy9TsE= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5 v5.1.0/go.mod h1:N10BjwUyNXtQz7WY6UoQqgli5dG1EtaHiZh8Q8DCfmg= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 h1:Dd+RhdJn0OTtVGaeDLZpcumkIVCtA/3/Fo42+eoYvVM= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0/go.mod h1:5kakwfW5CjC9KK+Q4wjXAg+ShuIm2mBMua0ZFj2C8PE= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= @@ -43,13 +57,28 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak= github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -73,6 +102,14 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5 h1:BjkPE3785EwPhhyuFkbINB+2a1xATwk8SNDWnJiD41g= github.com/cakturk/go-netstat v0.0.0-20200220111822-e5b49efee7a5/go.mod h1:jtAfVaU/2cu1+wdSRPWE2c1N2qeAA3K4RH9pYgqwets= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= @@ -129,22 +166,36 @@ github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284= +github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -157,6 +208,8 @@ github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= @@ -166,6 +219,8 @@ github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBd github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/florianl/go-tc v0.4.3 h1:xpobG2gFNvEqbclU07zjddALSjqTQTWJkxg5/kRYDpw= github.com/florianl/go-tc v0.4.3/go.mod h1:uvp6pIlOw7Z8hhfnT5M4+V1hHVgZWRZwwMS8Z0JsRxc= +github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= @@ -181,6 +236,8 @@ github.com/go-chi/chi/v5 v5.0.11 h1:BnpYbFZ3T3S1WMpD79r7R5ThWX40TaFB7L31Y8xqSwA= github.com/go-chi/chi/v5 v5.0.11/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= @@ -228,10 +285,20 @@ github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0 github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= +github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= +github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= +github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= +github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= +github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -269,6 +336,8 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= @@ -302,9 +371,15 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -361,7 +436,11 @@ github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4 github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -372,6 +451,8 @@ github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6 h1:fQqkJ+ github.com/inspektor-gadget/netns v0.0.5-0.20230524185006-155d84c555d6/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= @@ -389,12 +470,15 @@ github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmK github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786 h1:N527AHMa793TP5z5GNAn/VLPzlc0ewzWdeP/25gDfgQ= github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jsternberg/zap-logfmt v1.3.0 h1:z1n1AOHVVydOOVuyphbOKyR4NICDQFiJMn1IK5hVQ5Y= github.com/jsternberg/zap-logfmt v1.3.0/go.mod h1:N3DENp9WNmCZxvkBD/eReWwz1149BK6jEN9cQ4fNwZE= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= +github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= @@ -413,6 +497,13 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= @@ -424,6 +515,12 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -439,6 +536,11 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= @@ -467,6 +569,9 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -476,6 +581,9 @@ github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/moby v25.0.3+incompatible h1:Uzxm7JQOHBY8kZY2fa95a9kg0aTOt1cBidSZ+LXCxC4= @@ -547,6 +655,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -561,8 +671,11 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= @@ -573,11 +686,13 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= @@ -585,6 +700,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= +github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -604,6 +721,9 @@ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -612,6 +732,7 @@ github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9yS github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= @@ -657,6 +778,13 @@ github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23n github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -667,6 +795,12 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= @@ -718,6 +852,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= @@ -765,9 +900,10 @@ golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= @@ -792,6 +928,7 @@ golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -825,6 +962,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -848,6 +986,7 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= @@ -857,6 +996,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= @@ -953,6 +1093,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +helm.sh/helm/v3 v3.14.3 h1:HmvRJlwyyt9HjgmAuxHbHv3PhMz9ir/XNWHyXfmnOP4= +helm.sh/helm/v3 v3.14.3/go.mod h1:v6myVbyseSBJTzhmeE39UcPLNv6cQK6qss3dvgAySaE= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= @@ -961,6 +1103,8 @@ k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= @@ -977,6 +1121,8 @@ k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= +oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3 h1:fic0YtUGSr79nv8vn3ziNZJrPZsm64KT/Fd/bc7Q6xY= sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3/go.mod h1:Qox07m8Gh7skSeOfppEWllPxNMhA7+b93D8Qjj6rBlQ= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/e2e/README.md b/test/e2e/README.md new file mode 100644 index 0000000000..06a4581b36 --- /dev/null +++ b/test/e2e/README.md @@ -0,0 +1,19 @@ +# Retina E2E + +## Objectives + +- Steps are reusable +- Steps parameters are saved to the context of the job +- Once written to the job context, the values are immutable +- Cluster resources used in code should be able to be generated to yaml for easy manual repro +- Avoid shell/ps calls wherever possible and use go libraries for typed parameters (avoid capturing error codes/stderr/stdout) + +--- + +## Starter Example + +When authoring tests, make sure to prefix the test name with `TestE2E` so that it is skipped by existing pipeline unit test framework. +For reference, see the `test-all` recipe in the root [Makefile](../../Makefile). + +For sample test, please check out: +[the Retina E2E.](./scenarios/retina/drop/scenario.go) diff --git a/test/e2e/framework/azure/create-cluster-with-npm.go b/test/e2e/framework/azure/create-cluster-with-npm.go new file mode 100644 index 0000000000..4c2c62609d --- /dev/null +++ b/test/e2e/framework/azure/create-cluster-with-npm.go @@ -0,0 +1,141 @@ +package azure + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" +) + +var ( + ErrResourceNameTooLong = fmt.Errorf("resource name too long") + ErrEmptyFile = fmt.Errorf("empty file") +) + +const ( + clusterTimeout = 10 * time.Minute + AgentARMSKU = "Standard_D4pls_v5" + AuxilaryNodeCount = 1 +) + +type CreateNPMCluster struct { + SubscriptionID string + ResourceGroupName string + Location string + ClusterName string + VnetName string + SubnetName string + PodCidr string + DNSServiceIP string + ServiceCidr string +} + +func (c *CreateNPMCluster) Prevalidate() error { + return nil +} + +func (c *CreateNPMCluster) Stop() error { + return nil +} + +func (c *CreateNPMCluster) Run() error { + // Start with default cluster template + npmCluster := GetStarterClusterTemplate(c.Location) + + npmCluster.Properties.NetworkProfile.NetworkPolicy = to.Ptr(armcontainerservice.NetworkPolicyAzure) + + npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ + Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), + AvailabilityZones: []*string{to.Ptr("1")}, + Count: to.Ptr[int32](AuxilaryNodeCount), + EnableNodePublicIP: to.Ptr(false), + Mode: to.Ptr(armcontainerservice.AgentPoolModeUser), + OSType: to.Ptr(armcontainerservice.OSTypeWindows), + OSSKU: to.Ptr(armcontainerservice.OSSKUWindows2022), + ScaleDownMode: to.Ptr(armcontainerservice.ScaleDownModeDelete), + VMSize: to.Ptr(AgentSKU), + Name: to.Ptr("ws22"), + MaxPods: to.Ptr(int32(MaxPodsPerNode)), + }) + + /* todo: add azlinux node pool + npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ + Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), + AvailabilityZones: []*string{to.Ptr("1")}, + Count: to.Ptr[int32](AuxilaryNodeCount), + EnableNodePublicIP: to.Ptr(false), + Mode: to.Ptr(armcontainerservice.AgentPoolModeUser), + OSType: to.Ptr(armcontainerservice.OSTypeLinux), + OSSKU: to.Ptr(armcontainerservice.OSSKUAzureLinux), + ScaleDownMode: to.Ptr(armcontainerservice.ScaleDownModeDelete), + VMSize: to.Ptr(azure.AgentSKU), + Name: to.Ptr("azlinux"), + MaxPods: to.Ptr(int32(azure.MaxPodsPerNode)), + }) + */ + npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ + Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), + AvailabilityZones: []*string{to.Ptr("1")}, + Count: to.Ptr[int32](AuxilaryNodeCount), + EnableNodePublicIP: to.Ptr(false), + Mode: to.Ptr(armcontainerservice.AgentPoolModeUser), + OSType: to.Ptr(armcontainerservice.OSTypeLinux), + ScaleDownMode: to.Ptr(armcontainerservice.ScaleDownModeDelete), + VMSize: to.Ptr(AgentARMSKU), + Name: to.Ptr("arm64"), + MaxPods: to.Ptr(int32(MaxPodsPerNode)), + }) + + // Deploy cluster + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx, cancel := context.WithTimeout(context.Background(), clusterTimeout) + defer cancel() + + clientFactory, err := armcontainerservice.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create az client: %w", err) + } + + log.Printf("when the cluster is ready, use the below command to access and debug") + log.Printf("az aks get-credentials --resource-group %s --name %s --subscription %s", c.ResourceGroupName, c.ClusterName, c.SubscriptionID) + log.Printf("creating cluster \"%s\" in resource group \"%s\"...", c.ClusterName, c.ResourceGroupName) + + poller, err := clientFactory.NewManagedClustersClient().BeginCreateOrUpdate(ctx, c.ResourceGroupName, c.ClusterName, npmCluster, nil) + if err != nil { + return fmt.Errorf("failed to finish the create cluster request: %w", err) + } + + notifychan := make(chan struct{}) + go func() { + _, err = poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{ + Frequency: 5 * time.Second, + }) + if err != nil { + log.Printf("failed to create cluster: %v\n", err) + } else { + log.Printf("cluster %s is ready\n", c.ClusterName) + } + close(notifychan) + }() + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return fmt.Errorf("failed to create cluster: %w", ctx.Err()) + case <-ticker.C: + log.Printf("waiting for cluster %s to be ready...\n", c.ClusterName) + case <-notifychan: + return err + } + } +} diff --git a/test/e2e/framework/azure/create-cluster.go b/test/e2e/framework/azure/create-cluster.go new file mode 100644 index 0000000000..4a39d34eb8 --- /dev/null +++ b/test/e2e/framework/azure/create-cluster.go @@ -0,0 +1,115 @@ +package azure + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" +) + +const ( + MaxNumberOfNodes = 3 + MaxPodsPerNode = 250 + AgentSKU = "Standard_DS4_v2" +) + +var defaultClusterCreateTimeout = 30 * time.Minute + +type CreateCluster struct { + SubscriptionID string + ResourceGroupName string + Location string + ClusterName string +} + +func (c *CreateCluster) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx, cancel := context.WithTimeout(context.Background(), defaultClusterCreateTimeout) + defer cancel() + clientFactory, err := armcontainerservice.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + poller, err := clientFactory.NewManagedClustersClient().BeginCreateOrUpdate(ctx, c.ResourceGroupName, c.ClusterName, GetStarterClusterTemplate(c.Location), nil) + if err != nil { + return fmt.Errorf("failed to finish the create cluster request: %w", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to pull the create cluster result: %w", err) + } + + return nil +} + +func GetStarterClusterTemplate(location string) armcontainerservice.ManagedCluster { + id := armcontainerservice.ResourceIdentityTypeSystemAssigned + return armcontainerservice.ManagedCluster{ + Location: to.Ptr(location), + Tags: map[string]*string{ + "archv2": to.Ptr(""), + "tier": to.Ptr("production"), + }, + Properties: &armcontainerservice.ManagedClusterProperties{ + AddonProfiles: map[string]*armcontainerservice.ManagedClusterAddonProfile{}, + /* Moving this to a separate stage to enable AMA since it takes some time to provision + AzureMonitorProfile: &armcontainerservice.ManagedClusterAzureMonitorProfile{ + Metrics: &armcontainerservice.ManagedClusterAzureMonitorProfileMetrics{ + Enabled: to.Ptr(true), + }, + }, + */ + AgentPoolProfiles: []*armcontainerservice.ManagedClusterAgentPoolProfile{ + { + Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), + AvailabilityZones: []*string{to.Ptr("1")}, + Count: to.Ptr[int32](MaxNumberOfNodes), + EnableNodePublicIP: to.Ptr(false), + Mode: to.Ptr(armcontainerservice.AgentPoolModeSystem), + OSType: to.Ptr(armcontainerservice.OSTypeLinux), + ScaleDownMode: to.Ptr(armcontainerservice.ScaleDownModeDelete), + VMSize: to.Ptr(AgentSKU), + Name: to.Ptr("nodepool1"), + MaxPods: to.Ptr(int32(MaxPodsPerNode)), + }, + }, + KubernetesVersion: to.Ptr(""), + DNSPrefix: to.Ptr("dnsprefix1"), + EnablePodSecurityPolicy: to.Ptr(false), + EnableRBAC: to.Ptr(true), + LinuxProfile: nil, + NetworkProfile: &armcontainerservice.NetworkProfile{ + LoadBalancerSKU: to.Ptr(armcontainerservice.LoadBalancerSKUStandard), + OutboundType: to.Ptr(armcontainerservice.OutboundTypeLoadBalancer), + NetworkPlugin: to.Ptr(armcontainerservice.NetworkPluginAzure), + }, + WindowsProfile: &armcontainerservice.ManagedClusterWindowsProfile{ + AdminPassword: to.Ptr("replacePassword1234$"), + AdminUsername: to.Ptr("azureuser"), + }, + }, + Identity: &armcontainerservice.ManagedClusterIdentity{ + Type: &id, + }, + + SKU: &armcontainerservice.ManagedClusterSKU{ + Name: to.Ptr(armcontainerservice.ManagedClusterSKUName("Base")), + Tier: to.Ptr(armcontainerservice.ManagedClusterSKUTierStandard), + }, + } +} + +func (c *CreateCluster) Prevalidate() error { + return nil +} + +func (c *CreateCluster) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/create-rg.go b/test/e2e/framework/azure/create-rg.go new file mode 100644 index 0000000000..190d67ec13 --- /dev/null +++ b/test/e2e/framework/azure/create-rg.go @@ -0,0 +1,48 @@ +package azure + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" +) + +type CreateResourceGroup struct { + SubscriptionID string + ResourceGroupName string + Location string +} + +func (c *CreateResourceGroup) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armresources.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create resource group client: %w", err) + } + log.Printf("creating resource group %s in location %s...", c.ResourceGroupName, c.Location) + + _, err = clientFactory.NewResourceGroupsClient().CreateOrUpdate(ctx, c.ResourceGroupName, armresources.ResourceGroup{ + Location: to.Ptr(c.Location), + }, nil) + if err != nil { + return fmt.Errorf("failed to finish the request: %w", err) + } + + log.Printf("resource group created %s in location %s", c.ResourceGroupName, c.Location) + return nil +} + +func (c *CreateResourceGroup) Prevalidate() error { + return nil +} + +func (c *CreateResourceGroup) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/create-vnet.go b/test/e2e/framework/azure/create-vnet.go new file mode 100644 index 0000000000..c439b74793 --- /dev/null +++ b/test/e2e/framework/azure/create-vnet.go @@ -0,0 +1,110 @@ +package azure + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armnetwork "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v5" +) + +const FlowTimeoutInMinutes = 10 + +type CreateVNet struct { + SubscriptionID string + ResourceGroupName string + Location string + VnetName string + VnetAddressSpace string +} + +func (c *CreateVNet) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armnetwork.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + log.Printf("creating vnet \"%s\" in resource group \"%s\"...", c.VnetName, c.ResourceGroupName) + + poller, err := clientFactory.NewVirtualNetworksClient().BeginCreateOrUpdate(ctx, c.ResourceGroupName, c.VnetName, armnetwork.VirtualNetwork{ + Location: to.Ptr(c.Location), + Properties: &armnetwork.VirtualNetworkPropertiesFormat{ + AddressSpace: &armnetwork.AddressSpace{ + AddressPrefixes: []*string{ + to.Ptr(c.VnetAddressSpace), + }, + }, + FlowTimeoutInMinutes: to.Ptr[int32](FlowTimeoutInMinutes), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to finish the request for create vnet: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to pull the result for create vnet: %w", err) + } + return nil +} + +func (c *CreateVNet) Prevalidate() error { + return nil +} + +func (c *CreateVNet) Stop() error { + return nil +} + +type CreateSubnet struct { + SubscriptionID string + ResourceGroupName string + Location string + VnetName string + SubnetName string + SubnetAddressSpace string +} + +func (c *CreateSubnet) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armnetwork.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + log.Printf("creating subnet \"%s\" in vnet \"%s\" in resource group \"%s\"...", c.SubnetName, c.VnetName, c.ResourceGroupName) + + poller, err := clientFactory.NewSubnetsClient().BeginCreateOrUpdate(ctx, c.ResourceGroupName, c.VnetName, c.SubnetName, armnetwork.Subnet{ + Properties: &armnetwork.SubnetPropertiesFormat{ + AddressPrefix: to.Ptr(c.SubnetAddressSpace), + }, + }, nil) + if err != nil { + return fmt.Errorf("failed to finish the request for create subnet: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to pull the result for create subnet: %w", err) + } + return nil +} + +func (c *CreateSubnet) Prevalidate() error { + return nil +} + +func (c *CreateSubnet) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/delete-cluster.go b/test/e2e/framework/azure/delete-cluster.go new file mode 100644 index 0000000000..c53435d0d1 --- /dev/null +++ b/test/e2e/framework/azure/delete-cluster.go @@ -0,0 +1,48 @@ +package azure + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" +) + +type DeleteCluster struct { + ClusterName string + SubscriptionID string + ResourceGroupName string + Location string +} + +func (d *DeleteCluster) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armcontainerservice.NewClientFactory(d.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + log.Printf("deleting cluster %s in resource group %s...", d.ClusterName, d.ResourceGroupName) + poller, err := clientFactory.NewManagedClustersClient().BeginDelete(ctx, d.ResourceGroupName, d.ClusterName, nil) + if err != nil { + return fmt.Errorf("failed to finish the request: %w", err) + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to pull the result: %w", err) + } + return nil +} + +func (d *DeleteCluster) Prevalidate() error { + return nil +} + +func (d *DeleteCluster) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/delete-rg.go b/test/e2e/framework/azure/delete-rg.go new file mode 100644 index 0000000000..9691f0fe4e --- /dev/null +++ b/test/e2e/framework/azure/delete-rg.go @@ -0,0 +1,46 @@ +package azure + +import ( + "context" + "fmt" + "log" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" +) + +type DeleteResourceGroup struct { + SubscriptionID string + ResourceGroupName string + Location string +} + +func (d *DeleteResourceGroup) Run() error { + log.Printf("deleting resource group \"%s\"...", d.ResourceGroupName) + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armresources.NewClientFactory(d.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create resource group client: %w", err) + } + forceDeleteType := "Microsoft.Compute/virtualMachines,Microsoft.Compute/virtualMachineScaleSets" + _, err = clientFactory.NewResourceGroupsClient().BeginDelete(ctx, d.ResourceGroupName, &armresources.ResourceGroupsClientBeginDeleteOptions{ForceDeletionTypes: to.Ptr(forceDeleteType)}) + if err != nil { + return fmt.Errorf("failed to finish the delete resource group request: %w", err) + } + + log.Printf("resource group \"%s\" deleted successfully", d.ResourceGroupName) + return nil +} + +func (d *DeleteResourceGroup) Prevalidate() error { + return nil +} + +func (d *DeleteResourceGroup) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/enable-ama.go b/test/e2e/framework/azure/enable-ama.go new file mode 100644 index 0000000000..5dcd89ed5a --- /dev/null +++ b/test/e2e/framework/azure/enable-ama.go @@ -0,0 +1,117 @@ +package azure + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/dashboard/armdashboard" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor" +) + +const fileperms = 0o600 + +type CreateAzureMonitor struct { + SubscriptionID string + ResourceGroupName string + Location string + ClusterName string +} + +func (c *CreateAzureMonitor) Run() error { + log.Printf(`this will deploy azure monitor workspace and grafana, but as of 1/9/2024, the api docs don't show how to do +az aks update --enable-azure-monitor-metrics \ +-n $NAME \ +-g $CLUSTER_RESOURCE_GROUP \ +--azure-monitor-workspace-resource-id $AZMON_RESOURCE_ID \ +--grafana-resource-id $GRAFANA_RESOURCE_ID +`) + + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + + ctx := context.Background() + amaClientFactory, err := armmonitor.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create azure monitor workspace client: %w", err) + } + log.Printf("creating resource group %s in location %s...", c.ResourceGroupName, c.Location) + + // create azure monitor + _, err = amaClientFactory.NewAzureMonitorWorkspacesClient().Create(ctx, c.ResourceGroupName, "test", armmonitor.AzureMonitorWorkspaceResource{ + Location: &c.Location, + }, &armmonitor.AzureMonitorWorkspacesClientCreateOptions{}) + if err != nil { + return fmt.Errorf("failed to azure monitor workspace: %w", err) + } + + // Create grafana + + granafaClientFactory, err := armdashboard.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create grafana client: %w", err) + } + + _, err = granafaClientFactory.NewGrafanaClient().BeginCreate(ctx, c.ResourceGroupName, "test", armdashboard.ManagedGrafana{}, &armdashboard.GrafanaClientBeginCreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create grafana: %w", err) + } + + log.Printf("azure monitor workspace %s in location %s", c.ResourceGroupName, c.Location) + + // update aks cluster + + ctx, cancel := context.WithTimeout(context.Background(), defaultClusterCreateTimeout) + defer cancel() + aksClientFactory, err := armcontainerservice.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + + cluster, err := aksClientFactory.NewManagedClustersClient().Get(ctx, c.ResourceGroupName, c.ClusterName, nil) + if err != nil { + return fmt.Errorf("failed to get cluster to enable AMA: %w", err) + } + + // enable Azure Monitor Metrics + cluster.Properties.AzureMonitorProfile.Metrics.Enabled = to.Ptr(true) + + // Marshal the struct into a JSON byte array with indentation + jsonData, err := json.MarshalIndent(cluster, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal cluster to JSON for AMA: %w", err) + } + + // Write the JSON data to a file + err = os.WriteFile("cluster.json", jsonData, fileperms) + if err != nil { + return fmt.Errorf("failed to write cluster JSON to file for AMA: %w", err) + } + + poller, err := aksClientFactory.NewManagedClustersClient().BeginCreateOrUpdate(ctx, c.ResourceGroupName, c.ClusterName, GetStarterClusterTemplate(c.Location), nil) + if err != nil { + return fmt.Errorf("failed to finish the update cluster request for AMA: %w", err) + } + + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("failed to enable AMA on cluster %s: %w", *cluster.Name, err) + } + + return nil +} + +func (c *CreateAzureMonitor) Prevalidate() error { + return nil +} + +func (c *CreateAzureMonitor) Stop() error { + return nil +} diff --git a/test/e2e/framework/azure/get-kubeconfig.go b/test/e2e/framework/azure/get-kubeconfig.go new file mode 100644 index 0000000000..254eb87d94 --- /dev/null +++ b/test/e2e/framework/azure/get-kubeconfig.go @@ -0,0 +1,53 @@ +package azure + +import ( + "context" + "fmt" + "log" + "os" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" +) + +const KubeConfigPerms = 0o600 + +type GetAKSKubeConfig struct { + ClusterName string + SubscriptionID string + ResourceGroupName string + Location string + KubeConfigFilePath string +} + +func (c *GetAKSKubeConfig) Run() error { + cred, err := azidentity.NewAzureCLICredential(nil) + if err != nil { + return fmt.Errorf("failed to obtain a credential: %w", err) + } + ctx := context.Background() + clientFactory, err := armcontainerservice.NewClientFactory(c.SubscriptionID, cred, nil) + if err != nil { + return fmt.Errorf("failed to create client: %w", err) + } + res, err := clientFactory.NewManagedClustersClient().ListClusterUserCredentials(ctx, c.ResourceGroupName, c.ClusterName, nil) + if err != nil { + return fmt.Errorf("failed to finish the get managed cluster client request: %w", err) + } + + err = os.WriteFile(c.KubeConfigFilePath, []byte(res.Kubeconfigs[0].Value), KubeConfigPerms) + if err != nil { + return fmt.Errorf("failed to write kubeconfig to file \"%s\": %w", c.KubeConfigFilePath, err) + } + + log.Printf("kubeconfig for cluster \"%s\" in resource group \"%s\" written to \"%s\"\n", c.ClusterName, c.ResourceGroupName, c.KubeConfigFilePath) + return nil +} + +func (c *GetAKSKubeConfig) Prevalidate() error { + return nil +} + +func (c *GetAKSKubeConfig) Stop() error { + return nil +} diff --git a/test/e2e/framework/generic/load-tag.go b/test/e2e/framework/generic/load-tag.go new file mode 100644 index 0000000000..b5ee7652c3 --- /dev/null +++ b/test/e2e/framework/generic/load-tag.go @@ -0,0 +1,48 @@ +package generic + +import ( + "flag" + "fmt" + "log" + "os" +) + +const DefaultTagEnv = "TAG" + +type LoadTag struct { + TagEnv string +} + +func (s *LoadTag) Run() error { + tag := os.Getenv(s.TagEnv) + log.Printf("tag is %s\n", tag) + return nil +} + +func (s *LoadTag) Prevalidate() error { + tag := os.Getenv(s.TagEnv) + if tag != "" { + log.Printf("tag is %s", tag) + } else { + log.Printf("tag is not set from env %s", s.TagEnv) + + var tag string + flag.StringVar(&tag, "tag", "", "the tag to use for tests, like docker image tag") + if tag != "" { + log.Printf("using version \"%s\" from flag", tag) + os.Setenv(s.TagEnv, tag) + return nil + } else { + return fmt.Errorf("tag is not set from flag nor env %s", s.TagEnv) + } + } + return nil +} + +func (s *LoadTag) Postvalidate() error { + return nil +} + +func (s *LoadTag) Stop() error { + return nil +} diff --git a/test/e2e/framework/kubernetes/create-agnhost-statefulset.go b/test/e2e/framework/kubernetes/create-agnhost-statefulset.go new file mode 100644 index 0000000000..6a506f81e1 --- /dev/null +++ b/test/e2e/framework/kubernetes/create-agnhost-statefulset.go @@ -0,0 +1,157 @@ +package kubernetes + +import ( + "context" + "fmt" + "strconv" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +var ErrLabelMissingFromPod = fmt.Errorf("label missing from pod") + +const ( + AgnhostHTTPPort = 80 + AgnhostReplicas = 1 +) + +type CreateAgnhostStatefulSet struct { + AgnhostName string + AgnhostNamespace string + KubeConfigFilePath string +} + +func (c *CreateAgnhostStatefulSet) Run() error { + config, err := clientcmd.BuildConfigFromFlags("", c.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second) + defer cancel() + + agnhostStatefulest := c.getAgnhostDeployment() + + err = CreateResource(ctx, agnhostStatefulest, clientset) + if err != nil { + return fmt.Errorf("error agnhost component: %w", err) + } + + selector, exists := agnhostStatefulest.Spec.Selector.MatchLabels["app"] + if !exists { + return fmt.Errorf("missing label \"app=%s\" from agnhost statefulset: %w", c.AgnhostName, ErrLabelMissingFromPod) + } + + labelSelector := fmt.Sprintf("app=%s", selector) + err = WaitForPodReady(ctx, clientset, c.AgnhostNamespace, labelSelector) + if err != nil { + return fmt.Errorf("error waiting for agnhost pod to be ready: %w", err) + } + + return nil +} + +func (c *CreateAgnhostStatefulSet) Prevalidate() error { + return nil +} + +func (c *CreateAgnhostStatefulSet) Stop() error { + return nil +} + +func (c *CreateAgnhostStatefulSet) getAgnhostDeployment() *appsv1.StatefulSet { + reps := int32(AgnhostReplicas) + + return &appsv1.StatefulSet{ + TypeMeta: metaV1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: c.AgnhostName, + Namespace: c.AgnhostNamespace, + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &reps, + Selector: &metaV1.LabelSelector{ + MatchLabels: map[string]string{ + "app": c.AgnhostName, + "k8s-app": "agnhost", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metaV1.ObjectMeta{ + Labels: map[string]string{ + "app": c.AgnhostName, + "k8s-app": "agnhost", + }, + }, + + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + // prefer an even spread across the cluster to avoid scheduling on the same node + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: MaxAffinityWeight, + PodAffinityTerm: v1.PodAffinityTerm{ + TopologyKey: "kubernetes.io/hostname", + LabelSelector: &metaV1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s-app": "agnhost", + }, + }, + }, + }, + }, + }, + }, + NodeSelector: map[string]string{ + "kubernetes.io/os": "linux", + }, + Containers: []v1.Container{ + { + Name: c.AgnhostName, + Image: "acnpublic.azurecr.io/agnhost:2.40", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + "memory": resource.MustParse("20Mi"), + }, + Limits: v1.ResourceList{ + "memory": resource.MustParse("20Mi"), + }, + }, + Command: []string{ + "/agnhost", + }, + Args: []string{ + "serve-hostname", + "--http", + "--port", + strconv.Itoa(AgnhostHTTPPort), + }, + + Ports: []v1.ContainerPort{ + { + ContainerPort: AgnhostHTTPPort, + }, + }, + Env: []v1.EnvVar{}, + }, + }, + }, + }, + }, + } +} diff --git a/test/e2e/framework/kubernetes/create-kapinger-deployment.go b/test/e2e/framework/kubernetes/create-kapinger-deployment.go new file mode 100644 index 0000000000..60d082b7c3 --- /dev/null +++ b/test/e2e/framework/kubernetes/create-kapinger-deployment.go @@ -0,0 +1,256 @@ +package kubernetes + +import ( + "context" + "fmt" + "strconv" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + KapingerHTTPPort = 8080 + KapingerTCPPort = 8085 + KapingerUDPPort = 8086 + MaxAffinityWeight = 100 +) + +type CreateKapingerDeployment struct { + KapingerNamespace string + KapingerReplicas string + KubeConfigFilePath string +} + +func (c *CreateKapingerDeployment) Run() error { + _, err := strconv.Atoi(c.KapingerReplicas) + if err != nil { + return fmt.Errorf("error converting replicas to int for Kapinger replicas: %w", err) + } + + config, err := clientcmd.BuildConfigFromFlags("", c.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resources := []runtime.Object{ + c.GetKapingerService(), + c.GetKapingerServiceAccount(), + c.GetKapingerClusterRole(), + c.GetKapingerClusterRoleBinding(), + c.GetKapingerDeployment(), + } + + for i := range resources { + err = CreateResource(ctx, resources[i], clientset) + if err != nil { + return fmt.Errorf("error kapinger component: %w", err) + } + } + + return nil +} + +func (c *CreateKapingerDeployment) Prevalidate() error { + return nil +} + +func (c *CreateKapingerDeployment) Stop() error { + return nil +} + +func (c *CreateKapingerDeployment) GetKapingerDeployment() *appsv1.Deployment { + replicas, err := strconv.ParseInt(c.KapingerReplicas, 10, 32) + if err != nil { + fmt.Println("Error converting replicas to int for Kapinger replicas: ", err) + return nil + } + reps := int32(replicas) + + return &appsv1.Deployment{ + TypeMeta: metaV1.TypeMeta{ + Kind: "Deployment", + APIVersion: "apps/v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "kapinger", + Namespace: c.KapingerNamespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &reps, + Selector: &metaV1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "kapinger", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metaV1.ObjectMeta{ + Labels: map[string]string{ + "app": "kapinger", + "server": "good", + }, + }, + + Spec: v1.PodSpec{ + Affinity: &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + // prefer an even spread across the cluster to avoid scheduling on the same node + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: MaxAffinityWeight, + PodAffinityTerm: v1.PodAffinityTerm{ + TopologyKey: "kubernetes.io/hostname", + LabelSelector: &metaV1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "kapinger", + }, + }, + }, + }, + }, + }, + }, + ServiceAccountName: "kapinger-sa", + Containers: []v1.Container{ + { + Name: "kapinger", + Image: "acnpublic.azurecr.io/kapinger:be57650", + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + "memory": resource.MustParse("20Mi"), + }, + Limits: v1.ResourceList{ + "memory": resource.MustParse("20Mi"), + }, + }, + Ports: []v1.ContainerPort{ + { + ContainerPort: KapingerHTTPPort, + }, + }, + Env: []v1.EnvVar{ + { + Name: "TARGET_TYPE", + Value: "service", + }, + { + Name: "HTTP_PORT", + Value: strconv.Itoa(KapingerHTTPPort), + }, + { + Name: "TCP_PORT", + Value: strconv.Itoa(KapingerTCPPort), + }, + { + Name: "UDP_PORT", + Value: strconv.Itoa(KapingerUDPPort), + }, + }, + }, + }, + }, + }, + }, + } +} + +func (c *CreateKapingerDeployment) GetKapingerService() *v1.Service { + return &v1.Service{ + TypeMeta: metaV1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "kapinger-service", + Namespace: c.KapingerNamespace, + Labels: map[string]string{ + "app": "kapinger", + }, + }, + Spec: v1.ServiceSpec{ + Selector: map[string]string{ + "app": "kapinger", + }, + Ports: []v1.ServicePort{ + { + Port: KapingerHTTPPort, + Protocol: v1.ProtocolTCP, + TargetPort: intstr.FromInt(KapingerHTTPPort), + }, + }, + }, + } +} + +func (c *CreateKapingerDeployment) GetKapingerServiceAccount() *v1.ServiceAccount { + return &v1.ServiceAccount{ + TypeMeta: metaV1.TypeMeta{ + Kind: "ServiceAccount", + APIVersion: "v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "kapinger-sa", + Namespace: c.KapingerNamespace, + }, + } +} + +func (c *CreateKapingerDeployment) GetKapingerClusterRole() *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + TypeMeta: metaV1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "kapinger-role", + Namespace: c.KapingerNamespace, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"services", "pods"}, + Verbs: []string{"get", "list"}, + }, + }, + } +} + +func (c *CreateKapingerDeployment) GetKapingerClusterRoleBinding() *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + TypeMeta: metaV1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: "rbac.authorization.k8s.io/v1", + }, + ObjectMeta: metaV1.ObjectMeta{ + Name: "kapinger-rolebinding", + Namespace: c.KapingerNamespace, + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: "kapinger-sa", + Namespace: c.KapingerNamespace, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "kapinger-role", + }, + } +} diff --git a/test/e2e/framework/kubernetes/create-network-policy.go b/test/e2e/framework/kubernetes/create-network-policy.go new file mode 100644 index 0000000000..6c0cf41709 --- /dev/null +++ b/test/e2e/framework/kubernetes/create-network-policy.go @@ -0,0 +1,110 @@ +package kubernetes + +import ( + "context" + "fmt" + "strings" + + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + Egress = "egress" + Ingress = "ingress" +) + +type CreateDenyAllNetworkPolicy struct { + NetworkPolicyNamespace string + KubeConfigFilePath string + DenyAllLabelSelector string +} + +func (c *CreateDenyAllNetworkPolicy) Run() error { + config, err := clientcmd.BuildConfigFromFlags("", c.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + agnhostStatefulest := getNetworkPolicy(c.NetworkPolicyNamespace, c.DenyAllLabelSelector) + err = CreateResource(ctx, agnhostStatefulest, clientset) + if err != nil { + return fmt.Errorf("error creating simple deny-all network policy: %w", err) + } + + return nil +} + +func getNetworkPolicy(namespace, labelSelector string) *networkingv1.NetworkPolicy { + labelSelectorSlice := strings.Split(labelSelector, "=") + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deny-all", + Namespace: namespace, + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + labelSelectorSlice[0]: labelSelectorSlice[1], + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyTypeIngress, + networkingv1.PolicyTypeEgress, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{}, + Ingress: []networkingv1.NetworkPolicyIngressRule{}, + }, + } +} + +func (c *CreateDenyAllNetworkPolicy) Prevalidate() error { + return nil +} + +func (c *CreateDenyAllNetworkPolicy) Stop() error { + return nil +} + +type DeleteDenyAllNetworkPolicy struct { + NetworkPolicyNamespace string + KubeConfigFilePath string + DenyAllLabelSelector string +} + +func (d *DeleteDenyAllNetworkPolicy) Run() error { + config, err := clientcmd.BuildConfigFromFlags("", d.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + agnhostStatefulest := getNetworkPolicy(d.NetworkPolicyNamespace, d.DenyAllLabelSelector) + err = DeleteResource(ctx, agnhostStatefulest, clientset) + if err != nil { + return fmt.Errorf("error creating simple deny-all network policy: %w", err) + } + + return nil +} + +func (d *DeleteDenyAllNetworkPolicy) Prevalidate() error { + return nil +} diff --git a/test/e2e/framework/kubernetes/create-resource.go b/test/e2e/framework/kubernetes/create-resource.go new file mode 100644 index 0000000000..d5b5974092 --- /dev/null +++ b/test/e2e/framework/kubernetes/create-resource.go @@ -0,0 +1,225 @@ +package kubernetes + +import ( + "context" + "fmt" + "log" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +var ( + ErrUnknownResourceType = fmt.Errorf("unknown resource type") + ErrCreateNilResource = fmt.Errorf("cannot create nil resource") +) + +func CreateResource(ctx context.Context, obj runtime.Object, clientset *kubernetes.Clientset) error { //nolint:gocyclo //this is just boilerplate code + if obj == nil { + return ErrCreateNilResource + } + + switch o := obj.(type) { + case *appsv1.DaemonSet: + log.Printf("Creating/Updating DaemonSet \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().DaemonSets(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create DaemonSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update DaemonSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *appsv1.Deployment: + log.Printf("Creating/Updating Deployment \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().Deployments(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create Deployment \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update Deployment \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *appsv1.StatefulSet: + log.Printf("Creating/Updating StatefulSet \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().StatefulSets(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create StatefulSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update StatefulSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.Service: + log.Printf("Creating/Updating Service \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().Services(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create Service \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update Service \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.ServiceAccount: + log.Printf("Creating/Updating ServiceAccount \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().ServiceAccounts(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create ServiceAccount \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update ServiceAccount \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.Role: + log.Printf("Creating/Updating Role \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.RbacV1().Roles(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create Role \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update Role \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.RoleBinding: + log.Printf("Creating/Updating RoleBinding \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.RbacV1().RoleBindings(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create RoleBinding \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update RoleBinding \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.ClusterRole: + log.Printf("Creating/Updating ClusterRole \"%s\"...\n", o.Name) + client := clientset.RbacV1().ClusterRoles() + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create ClusterRole \"%s\": %w", o.Name, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update ClusterRole \"%s\": %w", o.Name, err) + } + + case *rbacv1.ClusterRoleBinding: + log.Printf("Creating/Updating ClusterRoleBinding \"%s\"...\n", o.Name) + client := clientset.RbacV1().ClusterRoleBindings() + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create ClusterRoleBinding \"%s\": %w", o.Name, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update ClusterRoleBinding \"%s\": %w", o.Name, err) + } + + case *v1.ConfigMap: + log.Printf("Creating/Updating ConfigMap \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().ConfigMaps(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create ConfigMap \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update ConfigMap \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *networkingv1.NetworkPolicy: + log.Printf("Creating/Updating NetworkPolicy \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.NetworkingV1().NetworkPolicies(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create NetworkPolicy \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update NetworkPolicy \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.Secret: + log.Printf("Creating/Updating Secret \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().Secrets(o.Namespace) + _, err := client.Get(ctx, o.Name, metaV1.GetOptions{}) + if errors.IsNotFound(err) { + _, err = client.Create(ctx, o, metaV1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create Secret \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + return nil + } + _, err = client.Update(ctx, o, metaV1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to create/update Secret \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + default: + return fmt.Errorf("unknown object type: %T, err: %w", obj, ErrUnknownResourceType) + } + return nil +} diff --git a/test/e2e/framework/kubernetes/delete-resource.go b/test/e2e/framework/kubernetes/delete-resource.go new file mode 100644 index 0000000000..5090d2e7c7 --- /dev/null +++ b/test/e2e/framework/kubernetes/delete-resource.go @@ -0,0 +1,353 @@ +package kubernetes + +import ( + "context" + "fmt" + "log" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +var ErrDeleteNilResource = fmt.Errorf("cannot create nil resource") + +type ResourceType string + +const ( + DaemonSet ResourceType = "DaemonSet" + Deployment ResourceType = "Deployment" + StatefulSet ResourceType = "StatefulSet" + Service ResourceType = "Service" + ServiceAccount ResourceType = "ServiceAccount" + Role ResourceType = "Role" + RoleBinding ResourceType = "RoleBinding" + ClusterRole ResourceType = "ClusterRole" + ClusterRoleBinding ResourceType = "ClusterRoleBinding" + ConfigMap ResourceType = "ConfigMap" + NetworkPolicy ResourceType = "NetworkPolicy" + Secret ResourceType = "Secret" + Unknown ResourceType = "Unknown" +) + +// Parameters can only be strings, heres to help add guardrails +func TypeString(resourceType ResourceType) string { + ResourceTypes := map[ResourceType]string{ + DaemonSet: "DaemonSet", + Deployment: "Deployment", + StatefulSet: "StatefulSet", + Service: "Service", + ServiceAccount: "ServiceAccount", + Role: "Role", + RoleBinding: "RoleBinding", + ClusterRole: "ClusterRole", + ClusterRoleBinding: "ClusterRoleBinding", + ConfigMap: "ConfigMap", + NetworkPolicy: "NetworkPolicy", + Secret: "Secret", + Unknown: "Unknown", + } + str, ok := ResourceTypes[resourceType] + if !ok { + return ResourceTypes[Unknown] + } + return str +} + +type DeleteKubernetesResource struct { + ResourceType string // can't use enum, breaks parameter parsing, all must be strings + ResourceName string + ResourceNamespace string + KubeConfigFilePath string +} + +func (d *DeleteKubernetesResource) Run() error { + config, err := clientcmd.BuildConfigFromFlags("", d.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeoutSeconds*time.Second) + defer cancel() + + res := ResourceType(d.ResourceType) + + var resource runtime.Object + + switch res { + case DaemonSet: + resource = &appsv1.DaemonSet{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case Deployment: + resource = &appsv1.Deployment{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case StatefulSet: + resource = &appsv1.StatefulSet{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case Service: + resource = &v1.Service{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case ServiceAccount: + resource = &v1.ServiceAccount{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case Role: + resource = &rbacv1.Role{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case RoleBinding: + resource = &rbacv1.RoleBinding{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case ClusterRole: + resource = &rbacv1.ClusterRole{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + }, + } + case ClusterRoleBinding: + resource = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + }, + } + case ConfigMap: + resource = &v1.ConfigMap{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case NetworkPolicy: + resource = &networkingv1.NetworkPolicy{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case Secret: + resource = &v1.Secret{ + ObjectMeta: metaV1.ObjectMeta{ + Name: d.ResourceName, + Namespace: d.ResourceNamespace, + }, + } + case Unknown: + return fmt.Errorf("unknown resource type: %s: %w", d.ResourceType, ErrUnknownResourceType) + default: + return ErrUnknownResourceType + } + + err = DeleteResource(ctx, resource, clientset) + if err != nil { + return fmt.Errorf("error deleting resource: %w", err) + } + + return nil +} + +func (d *DeleteKubernetesResource) Stop() error { + return nil +} + +func (d *DeleteKubernetesResource) Prevalidate() error { + restype := ResourceType(d.ResourceType) + if restype == Unknown { + return ErrUnknownResourceType + } + + return nil +} + +func DeleteResource(ctx context.Context, obj runtime.Object, clientset *kubernetes.Clientset) error { //nolint:gocyclo //this is just boilerplate code + if obj == nil { + return ErrCreateNilResource + } + + switch o := obj.(type) { + case *appsv1.DaemonSet: + log.Printf("Deleting DaemonSet \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().DaemonSets(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("DaemonSet \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete DaemonSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *appsv1.Deployment: + log.Printf("Deleting Deployment \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().Deployments(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("Deployment \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete Deployment \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *appsv1.StatefulSet: + log.Printf("Deleting StatefulSet \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.AppsV1().StatefulSets(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("StatefulSet \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete StatefulSet \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.Service: + log.Printf("Deleting Service \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().Services(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("Service \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete Service \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.ServiceAccount: + log.Printf("Deleting ServiceAccount \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().ServiceAccounts(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("ServiceAccount \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete ServiceAccount \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.Role: + log.Printf("Deleting Role \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.RbacV1().Roles(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("Role \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete Role \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.RoleBinding: + log.Printf("Deleting RoleBinding \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.RbacV1().RoleBindings(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("RoleBinding \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete RoleBinding \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.ClusterRole: + log.Printf("Deleting ClusterRole \"%s\"...\n", o.Name) + client := clientset.RbacV1().ClusterRoles() + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("ClusterRole \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete ClusterRole \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *rbacv1.ClusterRoleBinding: + log.Printf("Deleting ClusterRoleBinding \"%s\"...\n", o.Name) + client := clientset.RbacV1().ClusterRoleBindings() + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("ClusterRoleBinding \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete ClusterRoleBinding \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.ConfigMap: + log.Printf("Deleting ConfigMap \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().ConfigMaps(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("ConfigMap \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete ConfigMap \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *networkingv1.NetworkPolicy: + log.Printf("Deleting NetworkPolicy \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.NetworkingV1().NetworkPolicies(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("NetworkPolicy \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete NetworkPolicy \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + case *v1.Secret: + log.Printf("Deleting Secret \"%s\" in namespace \"%s\"...\n", o.Name, o.Namespace) + client := clientset.CoreV1().Secrets(o.Namespace) + err := client.Delete(ctx, o.Name, metaV1.DeleteOptions{}) + if err != nil { + if errors.IsNotFound(err) { + log.Printf("Secret \"%s\" in namespace \"%s\" does not exist\n", o.Name, o.Namespace) + return nil + } + return fmt.Errorf("failed to delete Secret \"%s\" in namespace \"%s\": %w", o.Name, o.Namespace, err) + } + + default: + return fmt.Errorf("unknown object type: %T, err: %w", obj, ErrUnknownResourceType) + } + return nil +} diff --git a/test/e2e/framework/kubernetes/exec-pod.go b/test/e2e/framework/kubernetes/exec-pod.go new file mode 100644 index 0000000000..456a43eb0f --- /dev/null +++ b/test/e2e/framework/kubernetes/exec-pod.go @@ -0,0 +1,89 @@ +package kubernetes + +import ( + "context" + "fmt" + "log" + "os" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/kubectl/pkg/scheme" +) + +const ExecSubResources = "exec" + +type ExecInPod struct { + PodNamespace string + KubeConfigFilePath string + PodName string + Command string +} + +func (e *ExecInPod) Run() error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := ExecPod(ctx, e.KubeConfigFilePath, e.PodNamespace, e.PodName, e.Command) + if err != nil { + return fmt.Errorf("error executing command [%s]: %w", e.Command, err) + } + + return nil +} + +func (e *ExecInPod) Prevalidate() error { + return nil +} + +func (e *ExecInPod) Stop() error { + return nil +} + +func ExecPod(ctx context.Context, kubeConfigFilePath, namespace, podName, command string) error { + config, err := clientcmd.BuildConfigFromFlags("", kubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("error creating Kubernetes client: %w", err) + } + + req := clientset.CoreV1().RESTClient().Post().Resource("pods").Name(podName). + Namespace(namespace).SubResource(ExecSubResources) + option := &v1.PodExecOptions{ + Command: strings.Fields(command), + Stdin: true, + Stdout: true, + Stderr: true, + TTY: false, + } + + req.VersionedParams( + option, + scheme.ParameterCodec, + ) + + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + if err != nil { + return fmt.Errorf("error creating executor: %w", err) + } + + log.Printf("executing command \"%s\" on pod \"%s\" in namespace \"%s\"...", command, podName, namespace) + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdin: os.Stdin, + Stdout: os.Stdout, + Stderr: os.Stderr, + }) + + if err != nil { + return fmt.Errorf("error executing command: %w", err) + } + + return nil +} diff --git a/test/e2e/framework/kubernetes/get-logs.go b/test/e2e/framework/kubernetes/get-logs.go new file mode 100644 index 0000000000..677eeb513b --- /dev/null +++ b/test/e2e/framework/kubernetes/get-logs.go @@ -0,0 +1,57 @@ +package kubernetes + +import ( + "context" + "fmt" + "io" + "log" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +func PrintPodLogs(kubeconfigpath string, namespace string, labelSelector string) { + // Load the kubeconfig file to get the configuration to access the cluster + config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath) + if err != nil { + log.Printf("error building kubeconfig: %s\n", err) + } + + // Create a new clientset to interact with the cluster + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + log.Printf("error creating clientset: %s\n", err) + } + + // List all the pods in the namespace + pods, err := clientset.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + log.Printf("error listing pods: %s\n", err) + } + + // Iterate over the pods and get the logs for each pod + for _, pod := range pods.Items { + fmt.Printf("############################## logs for pod %s #########################\n", pod.Name) + + // Get the logs for the pod + req := clientset.CoreV1().Pods(namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}) + podLogs, err := req.Stream(context.Background()) + if err != nil { + fmt.Printf("error getting logs for pod %s: %s\n", pod.Name, err) + } + defer podLogs.Close() + + // Read the logs + buf, err := io.ReadAll(podLogs) + if err != nil { + log.Printf("error reading logs for pod %s: %s\n", pod.Name, err) + } + + // Print the logs + log.Println(string(buf)) + } +} diff --git a/test/e2e/framework/kubernetes/install-retina-helm.go b/test/e2e/framework/kubernetes/install-retina-helm.go new file mode 100644 index 0000000000..93798386dd --- /dev/null +++ b/test/e2e/framework/kubernetes/install-retina-helm.go @@ -0,0 +1,116 @@ +package kubernetes + +import ( + "fmt" + "log" + "os" + "strings" + "time" + + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/cli" +) + +const ( + createTimeout = 240 * time.Second // windpws is slow + deleteTimeout = 60 * time.Second +) + +var errEmptyTag = fmt.Errorf("tag is empty") + +type InstallHelmChart struct { + Namespace string + ReleaseName string + KubeConfigFilePath string + ChartPath string + TagEnv string +} + +func (i *InstallHelmChart) Run() error { + settings := cli.New() + settings.KubeConfig = i.KubeConfigFilePath + actionConfig := new(action.Configuration) + + err := actionConfig.Init(settings.RESTClientGetter(), i.Namespace, os.Getenv("HELM_DRIVER"), log.Printf) + if err != nil { + return fmt.Errorf("failed to initialize helm action config: %w", err) + } + + tag := os.Getenv(i.TagEnv) + + // load chart from the path + chart, err := loader.Load(i.ChartPath) + if err != nil { + return fmt.Errorf("failed to load chart from path %s: %w", i.ChartPath, err) + } + + chart.Values["imagePullSecrets"] = []map[string]interface{}{ + { + "name": "acr-credentials", + }, + } + + chart.Values["image"].(map[string]interface{})["tag"] = tag + chart.Values["image"].(map[string]interface{})["pullPolicy"] = "Always" + chart.Values["operator"].(map[string]interface{})["tag"] = tag + + getclient := action.NewGet(actionConfig) + release, err := getclient.Run(i.ReleaseName) + if err == nil && release != nil { + log.Printf("found existing release by same name, removing before installing %s", release.Name) + delclient := action.NewUninstall(actionConfig) + delclient.Wait = true + delclient.Timeout = deleteTimeout + _, err = delclient.Run(i.ReleaseName) + if err != nil { + return fmt.Errorf("failed to delete existing release %s: %w", i.ReleaseName, err) + } + } else if err != nil && !strings.Contains(err.Error(), "not found") { + return fmt.Errorf("failed to get release %s: %w", i.ReleaseName, err) + } + + client := action.NewInstall(actionConfig) + client.Namespace = i.Namespace + client.ReleaseName = i.ReleaseName + client.Timeout = createTimeout + client.Wait = true + client.WaitForJobs = true + + // install the chart here + rel, err := client.Run(chart, chart.Values) + if err != nil { + PrintPodLogs(i.KubeConfigFilePath, i.Namespace, "k8s-app=retina") + return fmt.Errorf("failed to install chart: %w", err) + } + + log.Printf("installed chart from path: %s in namespace: %s\n", rel.Name, rel.Namespace) + // this will confirm the values set during installation + log.Printf("chart values: %v\n", rel.Config) + + return nil +} + +func (i *InstallHelmChart) Prevalidate() error { + _, err := os.Stat(i.ChartPath) + + if os.IsNotExist(err) { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("failed to get current working directory %s: %w", cwd, err) + } + fmt.Printf("the current working directory %s", cwd) + return fmt.Errorf("directory not found at %s: working directory: %s", i.ChartPath, cwd) + } + log.Printf("found chart at %s", i.ChartPath) + + if os.Getenv(i.TagEnv) == "" { + return fmt.Errorf("tag is not set from env \"%s\": %w", i.TagEnv, errEmptyTag) + } + + return nil +} + +func (i *InstallHelmChart) Stop() error { + return nil +} diff --git a/test/e2e/framework/kubernetes/port-forward.go b/test/e2e/framework/kubernetes/port-forward.go new file mode 100644 index 0000000000..8e0edc285b --- /dev/null +++ b/test/e2e/framework/kubernetes/port-forward.go @@ -0,0 +1,168 @@ +// todo: matmerr, this is just going to remain broken until it can be validated with scenarios pr + +package kubernetes + +import ( + "context" + "fmt" + "log" + "net/http" + "strconv" + "time" + + retry "github.com/microsoft/retina/test/retry" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" +) + +const ( + defaultTimeoutSeconds = 300 + defaultRetryDelay = 5 * time.Second + defaultRetryAttempts = 60 + defaultHTTPClientTimeout = 2 * time.Second +) + +var ( + ErrNoPodWithLabelFound = fmt.Errorf("no pod with label found with matching pod affinity") + + defaultRetrier = retry.Retrier{Attempts: defaultRetryAttempts, Delay: defaultRetryDelay} +) + +type PortForward struct { + Namespace string + LabelSelector string + LocalPort string + RemotePort string + KubeConfigFilePath string + OptionalLabelAffinity string + + // local properties + pf *PortForwarder +} + +func (p *PortForward) Run() error { + lport, _ := strconv.Atoi(p.LocalPort) + rport, _ := strconv.Atoi(p.RemotePort) + + pctx := context.Background() + portForwardCtx, cancel := context.WithTimeout(pctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + config, err := clientcmd.BuildConfigFromFlags("", p.KubeConfigFilePath) + if err != nil { + return fmt.Errorf("error building kubeconfig: %w", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return fmt.Errorf("could not create clientset: %w", err) + } + + // if we have an optional label affinity, find a pod with that label, on the same node as a pod with the label selector + targetPodName := "" + if p.OptionalLabelAffinity != "" { + // get all pods with label + log.Printf("attempting to find pod with label \"%s\", on a node with a pod with label \"%s\"\n", p.LabelSelector, p.OptionalLabelAffinity) + targetPodName, err = p.findPodsWithAffinity(pctx, clientset) + if err != nil { + return fmt.Errorf("could not find pod with affinity: %w", err) + } + } + + portForwardFn := func() error { + // if we have a pod name (likely from affinity above), use it, otherwise use label selector + opts := PortForwardingOpts{ + Namespace: p.Namespace, + PodName: targetPodName, + LocalPort: lport, + DestPort: rport, + } + + if targetPodName != "" { + opts.PodName = targetPodName + } + + log.Printf("attempting port forward to pod name \"%s\" with label \"%s\", in namespace \"%s\"...\n", targetPodName, p.LabelSelector, p.Namespace) + + p.pf, err = NewPortForwarder(config, logger{}, opts) + if err != nil { + return fmt.Errorf("could not create port forwarder: %w", err) + } + err = p.pf.Forward(pctx) + if err != nil { + return fmt.Errorf("could not start port forward: %w", err) + } + + // verify port forward succeeded + client := http.Client{ + Timeout: defaultHTTPClientTimeout, + } + resp, err := client.Get(p.pf.Address()) //nolint + if err != nil { + log.Printf("port forward validation HTTP request to %s failed: %v\n", p.pf.Address(), err) + p.pf.Stop() + return fmt.Errorf("port forward validation HTTP request to %s failed: %w", p.pf.Address(), err) + } + defer resp.Body.Close() + + log.Printf("port forward validation HTTP request to \"%s\" succeeded, response: %s\n", p.pf.Address(), resp.Status) + + return nil + } + + if err = defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + return fmt.Errorf("could not start port forward within %ds: %w", defaultTimeoutSeconds, err) + } + log.Printf("successfully port forwarded to \"%s\"\n", p.pf.Address()) + return nil +} + +func (p *PortForward) findPodsWithAffinity(ctx context.Context, clientset *kubernetes.Clientset) (string, error) { + targetPods, errAffinity := clientset.CoreV1().Pods(p.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: p.LabelSelector, + FieldSelector: "status.phase=Running", + }) + if errAffinity != nil { + return "", fmt.Errorf("could not list pods in %q with label %q: %w", p.Namespace, p.LabelSelector, errAffinity) + } + + affinityPods, errAffinity := clientset.CoreV1().Pods(p.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: p.OptionalLabelAffinity, + FieldSelector: "status.phase=Running", + }) + if errAffinity != nil { + return "", fmt.Errorf("could not list affinity pods in %q with label %q: %w", p.Namespace, p.OptionalLabelAffinity, errAffinity) + } + + // keep track of where the affinity pods are scheduled + affinityNodes := make(map[string]bool) + for i := range affinityPods.Items { + affinityNodes[affinityPods.Items[i].Spec.NodeName] = true + } + + // if a pod is found on the same node as an affinity pod, use it + for i := range targetPods.Items { + if affinityNodes[targetPods.Items[i].Spec.NodeName] { + // found a pod with the specified label, on a node with the optional label affinity + return targetPods.Items[i].Name, nil + } + } + + return "", fmt.Errorf("could not find a pod with label \"%s\", on a node that also has a pod with label \"%s\": %w", p.LabelSelector, p.OptionalLabelAffinity, ErrNoPodWithLabelFound) +} + +func (p *PortForward) Prevalidate() error { + return nil +} + +func (p *PortForward) Stop() error { + p.pf.Stop() + return nil +} + +type logger struct{} + +func (l *logger) Logf(format string, args ...interface{}) { + log.Printf(format, args...) +} diff --git a/test/e2e/framework/kubernetes/portforward.go b/test/e2e/framework/kubernetes/portforward.go new file mode 100644 index 0000000000..a62728d2c3 --- /dev/null +++ b/test/e2e/framework/kubernetes/portforward.go @@ -0,0 +1,196 @@ +package kubernetes + +import ( + "context" + "fmt" + "io" + "math/rand" + "net/http" + "sync" + "time" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +// PortForwarder can manage a port forwarding session. +type PortForwarder struct { + clientset *kubernetes.Clientset + transport http.RoundTripper + upgrader spdy.Upgrader + logger logger + + opts PortForwardingOpts + + stopChan chan struct{} + errChan chan error + address string + lazyAddress sync.Once +} + +type PortForwardingOpts struct { + Namespace string + LabelSelector string + PodName string + LocalPort int + DestPort int +} + +// NewPortForwarder creates a PortForwarder. +func NewPortForwarder(restConfig *rest.Config, logger logger, opts PortForwardingOpts) (*PortForwarder, error) { + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("could not create clientset: %w", err) + } + + transport, upgrader, err := spdy.RoundTripperFor(restConfig) + if err != nil { + return nil, fmt.Errorf("could not create spdy roundtripper: %w", err) + } + + return &PortForwarder{ + clientset: clientset, + transport: transport, + upgrader: upgrader, + logger: logger, + opts: opts, + stopChan: make(chan struct{}, 1), + }, nil +} + +// todo: can be made more flexible to allow a service to be specified + +// Forward attempts to initiate port forwarding a pod and port using the configured namespace and labels. +// An error is returned if a port forwarding session could not be started. If no error is returned, the +// Address method can be used to communicate with the pod, and the Stop and KeepAlive methods can be used +// to manage the lifetime of the port forwarding session. + +func (p *PortForwarder) Forward(ctx context.Context) error { + var podName string + if p.opts.PodName == "" { + pods, err := p.clientset.CoreV1().Pods(p.opts.Namespace).List(ctx, metav1.ListOptions{LabelSelector: p.opts.LabelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + return fmt.Errorf("could not list pods in %q with label %q: %w", p.opts.Namespace, p.opts.LabelSelector, err) + } + + if len(pods.Items) < 1 { + return fmt.Errorf("no pods found in %q with label %q", p.opts.Namespace, p.opts.LabelSelector) //nolint:goerr113 //no specific handling expected + } + + randomIndex := rand.Intn(len(pods.Items)) //nolint:gosec //this is going to be revised in the future anyways, avoid random pods + podName = pods.Items[randomIndex].Name + } else { + podName = p.opts.PodName + } + + pods, err := p.clientset.CoreV1().Pods(p.opts.Namespace).List(ctx, metav1.ListOptions{LabelSelector: p.opts.LabelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + return fmt.Errorf("could not list pods in %q with label %q: %w", p.opts.Namespace, p.opts.LabelSelector, err) + } + + if len(pods.Items) < 1 { + return fmt.Errorf("no pods found in %q with label %q", p.opts.Namespace, p.opts.LabelSelector) //nolint:goerr113 //no specific handling expected + } + + portForwardURL := p.clientset.CoreV1().RESTClient().Post(). + Resource("pods"). + Namespace(p.opts.Namespace). + Name(podName). + SubResource("portforward").URL() + + readyChan := make(chan struct{}, 1) + dialer := spdy.NewDialer(p.upgrader, &http.Client{Transport: p.transport}, http.MethodPost, portForwardURL) + ports := []string{fmt.Sprintf("%d:%d", p.opts.LocalPort, p.opts.DestPort)} + pf, err := portforward.New(dialer, ports, p.stopChan, readyChan, io.Discard, io.Discard) + if err != nil { + return fmt.Errorf("could not create portforwarder: %w", err) + } + + errChan := make(chan error, 1) + go func() { + // ForwardPorts is a blocking function thus it has to be invoked in a goroutine to allow callers to do + // other things, but it can return 2 kinds of errors: initial dial errors that will be caught in the select + // block below (Ready should not fire in these cases) and later errors if the connection is dropped. + // this is why we propagate the error channel to PortForwardStreamHandle: to allow callers to handle + // cases of eventual errors. + errChan <- pf.ForwardPorts() + }() + + var portForwardPort int + select { + case <-ctx.Done(): + return fmt.Errorf("portforward cancelled: %w", ctx.Err()) + case err := <-errChan: + return fmt.Errorf("portforward failed: %w", err) + case <-pf.Ready: + prts, err := pf.GetPorts() + if err != nil { + return fmt.Errorf("get portforward port: %w", err) + } + + if len(prts) < 1 { + return errors.New("no ports forwarded") + } + + portForwardPort = int(prts[0].Local) + } + + // once successful, any subsequent port forwarding sessions from keep alive would yield the same address. + // since the address could be read at the same time as the session is renewed, it's appropriate to initialize + // lazily. + p.lazyAddress.Do(func() { + p.address = fmt.Sprintf("http://localhost:%d", portForwardPort) + }) + + p.errChan = errChan + + return nil +} + +// Address returns an address for communicating with a port-forwarded pod. +func (p *PortForwarder) Address() string { + return p.address +} + +// Stop terminates a port forwarding session. +func (p *PortForwarder) Stop() { + select { + case p.stopChan <- struct{}{}: + default: + } +} + +// KeepAlive can be used to restart the port forwarding session in the background. +func (p *PortForwarder) KeepAlive(ctx context.Context) { + for { + select { + case <-ctx.Done(): + p.logger.Logf("port forwarder: keep alive cancelled: %v", ctx.Err()) + return + case pfErr := <-p.errChan: + // as of client-go v0.26.1, if the connection is successful at first but then fails, + // an error is logged but only a nil error is sent to this channel. this will be fixed + // in v0.27.x, which at the time of writing has not been released. + // + // see https://github.com/kubernetes/client-go/commit/d0842249d3b92ea67c446fe273f84fe74ebaed9f + // for the relevant change. + p.logger.Logf("port forwarder: received error signal: %v. restarting session", pfErr) + p.Stop() + if err := p.Forward(ctx); err != nil { + p.logger.Logf("port forwarder: could not restart session: %v. retrying", err) + + select { + case <-ctx.Done(): + p.logger.Logf("port forwarder: keep alive cancelled: %v", ctx.Err()) + return + case <-time.After(time.Second): // todo: make configurable? + continue + } + } + } + } +} diff --git a/test/e2e/framework/kubernetes/wait-pod-ready.go b/test/e2e/framework/kubernetes/wait-pod-ready.go new file mode 100644 index 0000000000..208fe140d1 --- /dev/null +++ b/test/e2e/framework/kubernetes/wait-pod-ready.go @@ -0,0 +1,62 @@ +package kubernetes + +import ( + "context" + "fmt" + "log" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +const ( + RetryTimeoutPodsReady = 5 * time.Minute + RetryIntervalPodsReady = 5 * time.Second +) + +func WaitForPodReady(ctx context.Context, clientset *kubernetes.Clientset, namespace, labelSelector string) error { + podReadyMap := make(map[string]bool) + + conditionFunc := wait.ConditionWithContextFunc(func(context.Context) (bool, error) { + var podList *corev1.PodList + podList, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return false, fmt.Errorf("error listing Pods: %w", err) + } + + if len(podList.Items) == 0 { + log.Printf("no pods found in namespace \"%s\" with label \"%s\"", namespace, labelSelector) + return false, nil + } + + // check each indviidual pod to see if it's in Running state + for i := range podList.Items { + var pod *corev1.Pod + pod, err = clientset.CoreV1().Pods(namespace).Get(ctx, podList.Items[i].Name, metav1.GetOptions{}) + if err != nil { + return false, fmt.Errorf("error getting Pod: %w", err) + } + + // Check the Pod phase + if pod.Status.Phase != corev1.PodRunning { + log.Printf("pod \"%s\" is not in Running state yet. Waiting...\n", pod.Name) + return false, nil + } + if !podReadyMap[pod.Name] { + log.Printf("pod \"%s\" is in Running state\n", pod.Name) + podReadyMap[pod.Name] = true + } + } + log.Printf("all pods in namespace \"%s\" with label \"%s\" are in Running state\n", namespace, labelSelector) + return true, nil + }) + + err := wait.PollUntilContextCancel(ctx, RetryIntervalPodsReady, true, conditionFunc) + if err != nil { + return fmt.Errorf("error waiting for pods in namespace \"%s\" with label \"%s\" to be in Running state: %w", namespace, labelSelector, err) + } + return nil +} diff --git a/test/e2e/framework/prometheus/prometheus.go b/test/e2e/framework/prometheus/prometheus.go new file mode 100644 index 0000000000..645ec2cd8f --- /dev/null +++ b/test/e2e/framework/prometheus/prometheus.go @@ -0,0 +1,103 @@ +package prom + +import ( + "context" + "fmt" + "io" + "log" + "net/http" + "reflect" + "time" + + "github.com/microsoft/retina/test/retry" + promclient "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +var ( + ErrNoMetricFound = fmt.Errorf("no metric found") + defaultTimeout = 300 * time.Second + defaultRetryDelay = 5 * time.Second + defaultRetryAttempts = 60 +) + +func CheckMetric(promAddress string, metricName string, validMetric map[string]string) error { + defaultRetrier := retry.Retrier{Attempts: defaultRetryAttempts, Delay: defaultRetryDelay} + + ctx := context.Background() + pctx, cancel := context.WithCancel(ctx) + defer cancel() + + metrics := map[string]*promclient.MetricFamily{} + scrapeMetricsFn := func() error { + log.Printf("checking for drop metrics on %s", promAddress) + var err error + + // obtain a full dump of all metrics on the endpoint + metrics, err = getAllPrometheusMetrics(promAddress) + if err != nil { + return fmt.Errorf("could not start port forward within %ds: %w ", defaultTimeout, err) + } + + // loop through each metric to check for a match, + // if none is found then log and return an error which will trigger a retry + err = verifyValidMetricPresent(metricName, metrics, validMetric) + if err != nil { + log.Printf("failed to find metric matching %s: %+v\n", metricName, validMetric) + return ErrNoMetricFound + } + + return nil + } + + err := defaultRetrier.Do(pctx, scrapeMetricsFn) + if err != nil { + return fmt.Errorf("failed to get prometheus metrics: %w", err) + } + return nil +} + +func verifyValidMetricPresent(metricName string, data map[string]*promclient.MetricFamily, validMetric map[string]string) error { + for _, metric := range data { + if metric.GetName() == metricName { + for _, metric := range metric.GetMetric() { + + // get all labels and values on the metric + metricLabels := map[string]string{} + for _, label := range metric.GetLabel() { + metricLabels[label.GetName()] = label.GetValue() + } + if reflect.DeepEqual(metricLabels, validMetric) { + return nil + } + } + } + } + + return fmt.Errorf("failed to find metric matching: %+v: %w", validMetric, ErrNoMetricFound) +} + +func getAllPrometheusMetrics(url string) (map[string]*promclient.MetricFamily, error) { + client := http.Client{} + resp, err := client.Get(url) //nolint + if err != nil { + return nil, fmt.Errorf("HTTP request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("HTTP request failed with status: %v", resp.Status) //nolint:goerr113,gocritic + } + + metrics, err := parseReaderPrometheusMetrics(resp.Body) + if err != nil { + return nil, err + } + + return metrics, nil +} + +func parseReaderPrometheusMetrics(input io.Reader) (map[string]*promclient.MetricFamily, error) { + var parser expfmt.TextParser + return parser.TextToMetricFamilies(input) //nolint +} diff --git a/test/e2e/framework/types/background_test.go b/test/e2e/framework/types/background_test.go new file mode 100644 index 0000000000..b564d5d298 --- /dev/null +++ b/test/e2e/framework/types/background_test.go @@ -0,0 +1,96 @@ +package types + +import ( + "fmt" + "log" + "sync" + "testing" + "time" +) + +func TestFramework(t *testing.T) { + job := NewJob("Validate that drop metrics are present in the prometheus endpoint") + runner := NewRunner(t, job) + defer runner.Run() + + job.AddStep(&TestBackground{ + CounterName: "Example Counter", + }, &StepOptions{ + ExpectError: false, + RunInBackgroundWithID: "TestStep", + }) + + job.AddStep(&Sleep{ + Duration: 1 * time.Second, + }, nil) + + job.AddStep(&Stop{ + BackgroundID: "TestStep", + }, nil) +} + +type TestBackground struct { + CounterName string + c *counter +} + +func (t *TestBackground) Run() error { + t.c = newCounter() + err := t.c.Start() + if err != nil { + return fmt.Errorf("failed to start counter: %w", err) + } + log.Println("running counter: " + t.CounterName) + return nil +} + +func (t *TestBackground) Stop() error { + log.Println("stopping counter: " + t.CounterName) + err := t.c.Stop() + if err != nil { + return fmt.Errorf("failed to stop counter: %w", err) + } + log.Println("count:", t.c.count) + return nil +} + +func (t *TestBackground) Prevalidate() error { + return nil +} + +type counter struct { + ticker *time.Ticker + count int + ch chan struct{} + wg sync.WaitGroup +} + +func newCounter() *counter { + return &counter{ + ch: make(chan struct{}), + } +} + +func (c *counter) Start() error { + c.ticker = time.NewTicker(1 * time.Millisecond) + c.wg.Add(1) + go func() { + for { + select { + case <-c.ticker.C: + c.count++ + case <-c.ch: + c.wg.Done() + return + } + } + }() + + return nil +} + +func (c *counter) Stop() error { + close(c.ch) + c.wg.Wait() + return nil +} diff --git a/test/e2e/framework/types/job.go b/test/e2e/framework/types/job.go new file mode 100644 index 0000000000..aefb96fae8 --- /dev/null +++ b/test/e2e/framework/types/job.go @@ -0,0 +1,320 @@ +package types + +import ( + "errors" + "fmt" + "log" + "reflect" +) + +var ( + ErrEmptyDescription = fmt.Errorf("job description is empty") + ErrNonNilError = fmt.Errorf("expected error to be non-nil") + ErrNilError = fmt.Errorf("expected error to be nil") + ErrMissingParameter = fmt.Errorf("missing parameter") + ErrParameterAlreadySet = fmt.Errorf("parameter already set") + ErrOrphanSteps = fmt.Errorf("background steps with no corresponding stop") + ErrCannotStopStep = fmt.Errorf("cannot stop step") + ErrMissingBackroundID = fmt.Errorf("missing background id") + ErrNoValue = fmt.Errorf("empty parameter not found saved in values") + ErrEmptyScenarioName = fmt.Errorf("scenario name is empty") +) + +// A Job is a logical grouping of steps, options and values +type Job struct { + values *JobValues + Description string + Steps []*StepWrapper + BackgroundSteps map[string]*StepWrapper + Scenarios map[*StepWrapper]*Scenario +} + +// A StepWrapper is a coupling of a step and it's options +type StepWrapper struct { + Step Step + Opts *StepOptions +} + +// A Scenario is a logical grouping of steps, used to describe a scenario such as "test drop metrics" +// which will require port forwarding, exec'ing, scraping, etc. +type Scenario struct { + name string + steps []*StepWrapper + values *JobValues +} + +func NewScenario(name string, steps ...*StepWrapper) *Scenario { + if name == "" { + log.Printf("scenario name is empty") + } + + return &Scenario{ + name: name, + steps: steps, + values: &JobValues{kv: make(map[string]string)}, + } +} + +func (j *Job) GetPrettyStepName(step *StepWrapper) string { + prettyname := reflect.TypeOf(step.Step).Elem().Name() + if j.Scenarios[step] != nil { + prettyname = fmt.Sprintf("%s (scenario: %s)", prettyname, j.Scenarios[step].name) + } + return prettyname +} + +func (j *Job) responseDivider(wrapper *StepWrapper) { + totalWidth := 125 + start := 20 + i := 0 + for ; i < start; i++ { + fmt.Print("#") + } + + mid := fmt.Sprintf(" %s ", j.GetPrettyStepName(wrapper)) + + fmt.Print(mid) + for ; i < totalWidth-(start+len(mid)); i++ { + fmt.Print("#") + } + fmt.Println() +} + +func NewJob(description string) *Job { + return &Job{ + values: &JobValues{ + kv: make(map[string]string), + }, + BackgroundSteps: make(map[string]*StepWrapper), + Scenarios: make(map[*StepWrapper]*Scenario), + Description: description, + } +} + +func (j *Job) AddScenario(scenario *Scenario) { + for i, step := range scenario.steps { + j.Steps = append(j.Steps, step) + j.Scenarios[scenario.steps[i]] = scenario + } +} + +func (j *Job) AddStep(step Step, opts *StepOptions) { + stepw := &StepWrapper{ + Step: step, + Opts: opts, + } + j.Steps = append(j.Steps, stepw) +} + +func (j *Job) GetValue(stepw *StepWrapper, key string) (string, bool) { + // if step exists in a scenario, use the scenario's values + // if the value isn't in the scenario's values, get the root job's value + if scenario, exists := j.Scenarios[stepw]; exists { + if scenario.values.Contains(key) { + return scenario.values.Get(key), true + } + } + if j.values.Contains(key) { + return j.values.Get(key), true + } + + return "", false +} + +// SetGetValues is used when we want to save parameters to job, and also check if +// the parameter exists in the scenario's or top level values +func (j *Job) SetGetValues(stepw *StepWrapper, key, value string) (string, error) { + // if top level step parameter is set, and scenario step is not, inherit + // if top level step parameter is not set, and scenario step is, use scenario step + // if top level step parameter is set, and scenario step is set, warn and use scenario step + + // check if scenario exists, if it does, check if the value is in the scenario's values + if scenario, exists := j.Scenarios[stepw]; exists { + scenarioValue, err := scenario.values.SetGet(key, value) + if err != nil && !errors.Is(err, ErrEmptyValue) { + return "", err + } + if scenarioValue != "" { + return scenarioValue, nil + } + } + + return j.values.SetGet(key, value) +} + +// GetValues is used when we want to skip saving parameters to job, but also check if +// the parameter exists in the scenario's or top level values +func (j *Job) GetValues(stepw *StepWrapper, key string) string { + // check if scenario exists, if it does, check if the value is in the scenario's values + if scenario, exists := j.Scenarios[stepw]; exists { + scenarioValue := scenario.values.Get(key) + if scenarioValue != "" { + return scenarioValue + } + } + + return j.values.Get(key) +} + +func (j *Job) Run() error { + if j.Description == "" { + return ErrEmptyDescription + } + + // validate all steps in the job, making sure parameters are set/validated etc. + err := j.Validate() + if err != nil { + return err // nolint:wrapcheck // don't wrap error, wouldn't provide any more context than the error itself + } + + for _, wrapper := range j.Steps { + err := wrapper.Step.Prevalidate() + if err != nil { + return err //nolint:wrapcheck // don't wrap error, wouldn't provide any more context than the error itself + } + } + + for _, wrapper := range j.Steps { + j.responseDivider(wrapper) + err := wrapper.Step.Run() + if wrapper.Opts.ExpectError && err == nil { + return fmt.Errorf("expected error from step %s but got nil: %w", reflect.TypeOf(wrapper.Step).Elem().Name(), ErrNilError) + } else if !wrapper.Opts.ExpectError && err != nil { + return fmt.Errorf("did not expect error from step %s but got error: %w", reflect.TypeOf(wrapper.Step).Elem().Name(), err) + } + } + + return nil +} + +func (j *Job) Validate() error { + // ensure that there are no background steps left after running + + for _, wrapper := range j.Steps { + err := j.validateStep(wrapper) + if err != nil { + return err + } + + } + + err := j.validateBackgroundSteps() + if err != nil { + return err + } + + return nil +} + +func (j *Job) validateBackgroundSteps() error { + stoppedBackgroundSteps := make(map[string]bool) + + for _, stepw := range j.Steps { + switch s := stepw.Step.(type) { + case *Stop: + if s.BackgroundID == "" { + return fmt.Errorf("cannot stop step with empty background id; %w", ErrMissingBackroundID) + } + + if j.BackgroundSteps[s.BackgroundID] == nil { + return fmt.Errorf("cannot stop step \"%s\", as it won't be started by this time; %w", s.BackgroundID, ErrCannotStopStep) + } + if stopped := stoppedBackgroundSteps[s.BackgroundID]; stopped { + return fmt.Errorf("cannot stop step \"%s\", as it has already been stopped; %w", s.BackgroundID, ErrCannotStopStep) + } + + // track for later on if the stop step is called + stoppedBackgroundSteps[s.BackgroundID] = true + + // set the stop step within the step + s.Step = j.BackgroundSteps[s.BackgroundID].Step + + default: + if stepw.Opts.RunInBackgroundWithID != "" { + if _, exists := j.BackgroundSteps[stepw.Opts.RunInBackgroundWithID]; exists { + log.Fatalf("step with id \"%s\" already exists", stepw.Opts.RunInBackgroundWithID) + } + j.BackgroundSteps[stepw.Opts.RunInBackgroundWithID] = stepw + stoppedBackgroundSteps[stepw.Opts.RunInBackgroundWithID] = false + } + } + } + + for stepName, stopped := range stoppedBackgroundSteps { + if !stopped { + return fmt.Errorf("step \"%s\" was not stopped; %w", stepName, ErrOrphanSteps) + } + } + + return nil +} + +func (j *Job) validateStep(step *StepWrapper) error { + val := reflect.ValueOf(step.Step).Elem() + + // set default options if none are provided + if step.Opts == nil { + step.Opts = &DefaultOpts + } + + switch step.Step.(type) { + case *Stop: + // don't validate stop steps + return nil + + case *Sleep: + // don't validate sleep steps + return nil + + default: + for i, f := range reflect.VisibleFields(val.Type()) { + + // skip saving unexported fields + if !f.IsExported() { + continue + } + + k := reflect.Indirect(val.Field(i)).Kind() + + if k == reflect.String { + parameter := val.Type().Field(i).Name + passedvalue := val.Field(i).Interface().(string) + + // if top level step parameter is set, and scenario step is not, inherit + // if top level step parameter is not set, and scenario step is, use scenario step + // if top level step parameter is set, and scenario step is set, warn and use scenario step + + var err error + var value string + if step.Opts.SkipSavingParamatersToJob { + retrievedvalue := j.GetValues(step, parameter) + + // if the value is already set, and it's not the same as the one we're trying to set, error + if retrievedvalue != "" && passedvalue != "" && retrievedvalue != passedvalue { + return fmt.Errorf("parameter \"%s\" was set as \"%s\", but was already saved as \"%s\"; %w", parameter, retrievedvalue, passedvalue, ErrParameterAlreadySet) + } + + if passedvalue == "" { + if retrievedvalue == "" { + return fmt.Errorf("parameter \"%s\" is empty in step \"%s\"; %w", parameter, j.GetPrettyStepName(step), ErrNoValue) + } + value = retrievedvalue + } else { + value = passedvalue + } + + } else { + value, err = j.SetGetValues(step, parameter, passedvalue) + if err != nil { + return fmt.Errorf("error setting parameter \"%s\": in step \"%s\": %w", parameter, j.GetPrettyStepName(step), err) + } + } + + // don't use log format since this is technically preexecution and easier to read + fmt.Printf("%s setting stored value for parameter [%s] set as [%s]\n", j.GetPrettyStepName(step), parameter, value) + val.Field(i).SetString(value) + } + } + } + return nil +} diff --git a/test/e2e/framework/types/jobvalues.go b/test/e2e/framework/types/jobvalues.go new file mode 100644 index 0000000000..4769c9e477 --- /dev/null +++ b/test/e2e/framework/types/jobvalues.go @@ -0,0 +1,54 @@ +package types + +import ( + "fmt" + "sync" +) + +var ( + ErrValueAlreadySet = fmt.Errorf("parameter already set in values") + ErrEmptyValue = fmt.Errorf("empty parameter not found in values") +) + +type JobValues struct { + RWLock sync.RWMutex + kv map[string]string +} + +func (j *JobValues) New() *JobValues { + return &JobValues{ + kv: make(map[string]string), + } +} + +func (j *JobValues) Contains(key string) bool { + j.RWLock.RLock() + defer j.RWLock.RUnlock() + _, ok := j.kv[key] + return ok +} + +func (j *JobValues) Get(key string) string { + j.RWLock.RLock() + defer j.RWLock.RUnlock() + return j.kv[key] +} + +func (j *JobValues) SetGet(key, value string) (string, error) { + j.RWLock.Lock() + defer j.RWLock.Unlock() + + _, ok := j.kv[key] + + switch { + case !ok && value != "": + j.kv[key] = value + return value, nil + case ok && value == "": + return j.kv[key], nil + case ok && value != "": + return "", ErrValueAlreadySet + } + + return "", ErrEmptyValue +} diff --git a/test/e2e/framework/types/runner.go b/test/e2e/framework/types/runner.go new file mode 100644 index 0000000000..835fe05f32 --- /dev/null +++ b/test/e2e/framework/types/runner.go @@ -0,0 +1,28 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// A wrapper around a job, so that internal job components don't require things like *testing.T +// and can be reused elsewhere +type Runner struct { + t *testing.T + Job *Job +} + +func NewRunner(t *testing.T, job *Job) *Runner { + return &Runner{ + t: t, + Job: job, + } +} + +func (r *Runner) Run() { + if r.t.Failed() { + return + } + require.NoError(r.t, r.Job.Run()) +} diff --git a/test/e2e/framework/types/scenarios_test.go b/test/e2e/framework/types/scenarios_test.go new file mode 100644 index 0000000000..f6d0768e8e --- /dev/null +++ b/test/e2e/framework/types/scenarios_test.go @@ -0,0 +1,126 @@ +package types + +import ( + "fmt" + "testing" +) + +// Test against a BYO cluster with Cilium and Hubble enabled, +// create a pod with a deny all network policy and validate +// that the drop metrics are present in the prometheus endpoint +func TestScenarioValues(t *testing.T) { + job := NewJob("Validate that drop metrics are present in the prometheus endpoint") + runner := NewRunner(t, job) + defer runner.Run() + + // Add top level step + job.AddStep(&DummyStep{ + Parameter1: "Top Level Step 1", + Parameter2: "Top Level Step 2", + }, nil) + + // Add scenario to ensure that the parameters are set correctly + // and inherited without overriding + job.AddScenario(NewDummyScenario()) + + job.AddStep(&DummyStep{}, nil) +} + +// Test against a BYO cluster with Cilium and Hubble enabled, +// create a pod with a deny all network policy and validate +// that the drop metrics are present in the prometheus endpoint +func TestScenarioValuesWithSkip(t *testing.T) { + job := NewJob("Validate that drop metrics are present in the prometheus endpoint") + runner := NewRunner(t, job) + defer runner.Run() + + // Add top level step + job.AddStep(&DummyStep{ + Parameter1: "Top Level Step 1", + Parameter2: "Top Level Step 2", + }, &StepOptions{ + SkipSavingParamatersToJob: true, + }) + + // top level step skips saving parameters, so we should error here + // that parameters are missing + job.AddScenario(NewDummyScenario()) + + job.AddStep(&DummyStep{ + Parameter1: "Other Level Step 1", + Parameter2: "Other Level Step 2", + }, nil) +} + +func TestScenarioValuesWithScenarioSkip(t *testing.T) { + job := NewJob("Validate that drop metrics are present in the prometheus endpoint") + runner := NewRunner(t, job) + defer runner.Run() + + // Add top level step + job.AddStep(&DummyStep{ + Parameter1: "Kubeconfig path 1", + Parameter2: "Kubeconfig path 2", + }, nil) + + // top level step skips saving parameters, so we should error here + // that parameters are missing + job.AddScenario(NewDummyScenarioWithSkipSave()) + + // Add top level step + job.AddStep(&DummyStep{}, nil) +} + +func NewDummyScenario() *Scenario { + return NewScenario("Dummy Scenario", + &StepWrapper{ + Step: &DummyStep{ + Parameter1: "Something in Scenario 1", + Parameter2: "Something in Scenario 1", + }, + }, + ) +} + +func NewDummyScenario2() *Scenario { + return NewScenario("Dummy Scenario", + &StepWrapper{ + Step: &DummyStep{ + Parameter1: "Something 2 in Scenario 1", + Parameter2: "Something 2 in Scenario 1", + }, + }, + ) +} + +func NewDummyScenarioWithSkipSave() *Scenario { + return NewScenario("Dummy Scenario", + &StepWrapper{ + Step: &DummyStep{ + Parameter1: "", + Parameter2: "", + }, Opts: &StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + ) +} + +type DummyStep struct { + Parameter1 string + Parameter2 string +} + +func (d *DummyStep) Run() error { + fmt.Printf("Running DummyStep with parameter 1 as: %s\n", d.Parameter1) + fmt.Printf("Running DummyStep with parameter 2 as: %s\n", d.Parameter2) + return nil +} + +func (d *DummyStep) Stop() error { + return nil +} + +func (d *DummyStep) Prevalidate() error { + return nil +} diff --git a/test/e2e/framework/types/step.go b/test/e2e/framework/types/step.go new file mode 100644 index 0000000000..dd14f2bcd1 --- /dev/null +++ b/test/e2e/framework/types/step.go @@ -0,0 +1,41 @@ +package types + +var DefaultOpts = StepOptions{ + // when wanting to expect an error, set to true + ExpectError: false, + + // when wanting to avoid saving the parameters to the job, + // such as a repetetive task where step is used multiple times sequentially, + // but parameters are different each time + SkipSavingParamatersToJob: false, +} + +type Step interface { + // Useful when wanting to do parameter checking, for example + // if a parameter length is known to be required less than 80 characters, + // do this here so we don't find out later on when we run the step + // when possible, try to avoid making external calls, this should be fast and simple + Prevalidate() error + + // Primary step where test logic is executed + // Returning an error will cause the test to fail + Run() error + + // Require for background steps + Stop() error +} + +type StepOptions struct { + ExpectError bool + + // Generally set this to false when you want to reuse + // a step, but you don't want to save the parameters + // ex: Sleep for 15 seconds, then Sleep for 10 seconds, + // you don't want to save the parameters + SkipSavingParamatersToJob bool + + // Will save this step to the job's steps + // and then later on when Stop is called with job name, + // it will call Stop() on the step + RunInBackgroundWithID string +} diff --git a/test/e2e/framework/types/step_sleep.go b/test/e2e/framework/types/step_sleep.go new file mode 100644 index 0000000000..2c89d1deb9 --- /dev/null +++ b/test/e2e/framework/types/step_sleep.go @@ -0,0 +1,24 @@ +package types + +import ( + "log" + "time" +) + +type Sleep struct { + Duration time.Duration +} + +func (c *Sleep) Run() error { + log.Printf("sleeping for %s...\n", c.Duration.String()) + time.Sleep(c.Duration) + return nil +} + +func (c *Sleep) Stop() error { + return nil +} + +func (c *Sleep) Prevalidate() error { + return nil +} diff --git a/test/e2e/framework/types/step_stop.go b/test/e2e/framework/types/step_stop.go new file mode 100644 index 0000000000..226fd8b2f5 --- /dev/null +++ b/test/e2e/framework/types/step_stop.go @@ -0,0 +1,30 @@ +package types + +import ( + "fmt" + "log" + "reflect" +) + +type Stop struct { + BackgroundID string + Step Step +} + +func (c *Stop) Run() error { + stepName := reflect.TypeOf(c.Step).Elem().Name() + log.Println("stopping step:", stepName) + err := c.Step.Stop() + if err != nil { + return fmt.Errorf("failed to stop step: %s with err %w", stepName, err) + } + return nil +} + +func (c *Stop) Stop() error { + return nil +} + +func (c *Stop) Prevalidate() error { + return nil +} diff --git a/test/e2e/scenarios/retina/drop/scenario.go b/test/e2e/scenarios/retina/drop/scenario.go new file mode 100644 index 0000000000..68340b91d9 --- /dev/null +++ b/test/e2e/scenarios/retina/drop/scenario.go @@ -0,0 +1,107 @@ +package drop + +import ( + "time" + + "github.com/microsoft/retina/test/e2e/framework/kubernetes" + "github.com/microsoft/retina/test/e2e/framework/types" +) + +const ( + sleepDelay = 5 * time.Second + TCP = "TCP" + UDP = "UDP" + + IPTableRuleDrop = "IPTABLE_RULE_DROP" +) + +func ValidateDropMetric() *types.Scenario { + name := "Drop Metrics" + steps := []*types.StepWrapper{ + { + Step: &kubernetes.CreateDenyAllNetworkPolicy{ + NetworkPolicyNamespace: "kube-system", + DenyAllLabelSelector: "app=agnhost-a", + }, + }, + { + Step: &kubernetes.CreateAgnhostStatefulSet{ + AgnhostName: "agnhost-a", + AgnhostNamespace: "kube-system", + }, + }, + { + Step: &kubernetes.ExecInPod{ + PodName: "agnhost-a-0", + PodNamespace: "kube-system", + Command: "curl -s -m 5 bing.com", + }, + Opts: &types.StepOptions{ + ExpectError: true, + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &types.Sleep{ + Duration: sleepDelay, + }, + }, + { + Step: &kubernetes.ExecInPod{ + PodName: "agnhost-a-0", + PodNamespace: "kube-system", + Command: "curl -s -m 5 bing.com", + }, + Opts: &types.StepOptions{ + ExpectError: true, + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &kubernetes.PortForward{ + Namespace: "kube-system", + LabelSelector: "k8s-app=retina", + LocalPort: "10093", + RemotePort: "10093", + OptionalLabelAffinity: "app=agnhost-a", // port forward to a pod on a node that also has this pod with this label, assuming same namespace + }, + Opts: &types.StepOptions{ + RunInBackgroundWithID: "drop-port-forward", + }, + }, + { + Step: &ValidateRetinaDropMetric{ + PortForwardedRetinaPort: "10093", + Source: "agnhost-a", + Reason: IPTableRuleDrop, + Direction: "unknown", + Protocol: UDP, + }, + }, + { + Step: &types.Stop{ + BackgroundID: "drop-port-forward", + }, + }, + + { + Step: &kubernetes.DeleteKubernetesResource{ + ResourceType: kubernetes.TypeString(kubernetes.NetworkPolicy), + ResourceName: "deny-all", + ResourceNamespace: "kube-system", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &kubernetes.DeleteKubernetesResource{ + ResourceType: kubernetes.TypeString(kubernetes.StatefulSet), + ResourceName: "agnhost-a", + ResourceNamespace: "kube-system", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + } + return types.NewScenario(name, steps...) +} diff --git a/test/e2e/scenarios/retina/drop/validate-drop-metric.go b/test/e2e/scenarios/retina/drop/validate-drop-metric.go new file mode 100644 index 0000000000..7b647e65f8 --- /dev/null +++ b/test/e2e/scenarios/retina/drop/validate-drop-metric.go @@ -0,0 +1,58 @@ +package drop + +import ( + "fmt" + "log" + + prom "github.com/microsoft/retina/test/e2e/framework/prometheus" +) + +var ( + dropCountMetricName = "networkobservability_drop_count" + dropBytesMetricName = "networkobservability_drop_bytes" +) + +const ( + destinationKey = "destination" + sourceKey = "source" + protcolKey = "protocol" + reasonKey = "reason" + directionKey = "direction" +) + +type ValidateRetinaDropMetric struct { + PortForwardedRetinaPort string + Source string + Protocol string + Reason string + Direction string +} + +func (v *ValidateRetinaDropMetric) Run() error { + promAddress := fmt.Sprintf("http://localhost:%s/metrics", v.PortForwardedRetinaPort) + + metric := map[string]string{ + directionKey: v.Direction, reasonKey: IPTableRuleDrop, + } + + err := prom.CheckMetric(promAddress, dropCountMetricName, metric) + if err != nil { + return fmt.Errorf("failed to verify prometheus metrics %s: %w", dropCountMetricName, err) + } + + err = prom.CheckMetric(promAddress, dropBytesMetricName, metric) + if err != nil { + return fmt.Errorf("failed to verify prometheus metrics %s: %w", dropBytesMetricName, err) + } + + log.Printf("found metrics matching %+v\n", metric) + return nil +} + +func (v *ValidateRetinaDropMetric) Prevalidate() error { + return nil +} + +func (v *ValidateRetinaDropMetric) Stop() error { + return nil +} diff --git a/test/e2e/scenarios/retina/retina_scenarios_test.go b/test/e2e/scenarios/retina/retina_scenarios_test.go new file mode 100644 index 0000000000..02595d129c --- /dev/null +++ b/test/e2e/scenarios/retina/retina_scenarios_test.go @@ -0,0 +1,81 @@ +package retina + +import ( + "os" + "os/user" + "strconv" + "testing" + "time" + + "github.com/microsoft/retina/test/e2e/framework/azure" + "github.com/microsoft/retina/test/e2e/framework/generic" + "github.com/microsoft/retina/test/e2e/framework/kubernetes" + "github.com/microsoft/retina/test/e2e/framework/types" + "github.com/microsoft/retina/test/e2e/scenarios/retina/drop" + tcp "github.com/microsoft/retina/test/e2e/scenarios/retina/tcp" +) + +const ( + // netObsRGtag is used to tag resources created by this test suite + netObsRGtag = "-e2e-netobs-" +) + +// Test against AKS cluster with NPM enabled, +// create a pod with a deny all network policy and validate +// that the drop metrics are present in the prometheus endpoint +func TestE2ERetinaMetrics(t *testing.T) { + job := types.NewJob("Validate that drop metrics are present in the prometheus endpoint") + runner := types.NewRunner(t, job) + defer runner.Run() + + curuser, _ := user.Current() + + testName := curuser.Username + netObsRGtag + strconv.FormatInt(time.Now().Unix(), 10) + sub := os.Getenv("AZURE_SUBSCRIPTION_ID") + + job.AddStep(&azure.CreateResourceGroup{ + SubscriptionID: sub, + ResourceGroupName: testName, + Location: "eastus", + }, nil) + + job.AddStep(&azure.CreateVNet{ + VnetName: "testvnet", + VnetAddressSpace: "10.0.0.0/9", + }, nil) + + job.AddStep(&azure.CreateSubnet{ + SubnetName: "testsubnet", + SubnetAddressSpace: "10.0.0.0/12", + }, nil) + + job.AddStep(&azure.CreateNPMCluster{ + ClusterName: testName, + PodCidr: "10.128.0.0/9", + DNSServiceIP: "192.168.0.10", + ServiceCidr: "192.168.0.0/28", + }, nil) + + job.AddStep(&azure.GetAKSKubeConfig{ + KubeConfigFilePath: "./test.pem", + }, nil) + + // todo: after loading tag, consume it in subsequent steps + job.AddStep(&generic.LoadTag{ + TagEnv: generic.DefaultTagEnv, + }, nil) + + // todo: enable mutating images in helm chart + job.AddStep(&kubernetes.InstallHelmChart{ + Namespace: "kube-system", + ReleaseName: "retina", + ChartPath: "../../../../deploy/manifests/controller/helm/retina/", + }, nil) + + job.AddScenario(drop.ValidateDropMetric()) + + // todo: handle multiple scenarios back to back + job.AddScenario(tcp.ValidateTCPMetrics()) + + job.AddStep(&azure.DeleteResourceGroup{}, nil) +} diff --git a/test/e2e/scenarios/retina/tcp/scenario.go b/test/e2e/scenarios/retina/tcp/scenario.go new file mode 100644 index 0000000000..5913fed027 --- /dev/null +++ b/test/e2e/scenarios/retina/tcp/scenario.go @@ -0,0 +1,91 @@ +package flow + +import ( + "time" + + k8s "github.com/microsoft/retina/test/e2e/framework/kubernetes" + "github.com/microsoft/retina/test/e2e/framework/types" +) + +const ( + sleepDelay = 5 * time.Second + TCP = "TCP" + UDP = "UDP" + + IPTableRuleDrop = "IPTABLE_RULE_DROP" +) + +func ValidateTCPMetrics() *types.Scenario { + Name := "Flow Metrics" + Steps := []*types.StepWrapper{ + { + Step: &k8s.CreateKapingerDeployment{ + KapingerNamespace: "kube-system", + KapingerReplicas: "1", + }, + }, + { + Step: &k8s.CreateAgnhostStatefulSet{ + AgnhostName: "agnhost-a", + AgnhostNamespace: "kube-system", + }, + }, + { + Step: &k8s.ExecInPod{ + PodName: "agnhost-a-0", + PodNamespace: "kube-system", + Command: "curl -s -m 5 bing.com", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &types.Sleep{ + Duration: sleepDelay, + }, + }, + { + Step: &k8s.ExecInPod{ + PodName: "agnhost-a-0", + PodNamespace: "kube-system", + Command: "curl -s -m 5 bing.com", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &k8s.PortForward{ + LabelSelector: "k8s-app=retina", + Namespace: "kube-system", + LocalPort: "10093", + RemotePort: "10093", + OptionalLabelAffinity: "app=agnhost-a", // port forward to a pod on a node that also has this pod with this label, assuming same namespace + }, + Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + RunInBackgroundWithID: "drop-flow-forward", + }, + }, + { + Step: &ValidateRetinaTCPStateMetric{ + PortForwardedRetinaPort: "10093", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &ValidateRetinaTCPConnectionRemoteMetric{ + PortForwardedRetinaPort: "10093", + }, Opts: &types.StepOptions{ + SkipSavingParamatersToJob: true, + }, + }, + { + Step: &types.Stop{ + BackgroundID: "drop-flow-forward", + }, + }, + } + + return types.NewScenario(Name, Steps...) +} diff --git a/test/e2e/scenarios/retina/tcp/validate-flow-metric.go b/test/e2e/scenarios/retina/tcp/validate-flow-metric.go new file mode 100644 index 0000000000..4af54b766b --- /dev/null +++ b/test/e2e/scenarios/retina/tcp/validate-flow-metric.go @@ -0,0 +1,50 @@ +package flow + +import ( + "fmt" + "log" + + prom "github.com/microsoft/retina/test/e2e/framework/prometheus" +) + +var tcpStateMetricName = "networkobservability_tcp_state" + +const ( + state = "state" + + established = "ESTABLISHED" + listen = "LISTEN" + time_wait = "TIME_WAIT" +) + +type ValidateRetinaTCPStateMetric struct { + PortForwardedRetinaPort string +} + +func (v *ValidateRetinaTCPStateMetric) Run() error { + promAddress := fmt.Sprintf("http://localhost:%s/metrics", v.PortForwardedRetinaPort) + + validMetrics := []map[string]string{ + {state: established}, + {state: listen}, + {state: time_wait}, + } + + for _, metric := range validMetrics { + err := prom.CheckMetric(promAddress, tcpStateMetricName, metric) + if err != nil { + return fmt.Errorf("failed to verify prometheus metrics: %w", err) + } + } + + log.Printf("found metrics matching %+v\n", validMetrics) + return nil +} + +func (v *ValidateRetinaTCPStateMetric) Prevalidate() error { + return nil +} + +func (v *ValidateRetinaTCPStateMetric) Stop() error { + return nil +} diff --git a/test/e2e/scenarios/retina/tcp/validate-tcp-connection-remote.go b/test/e2e/scenarios/retina/tcp/validate-tcp-connection-remote.go new file mode 100644 index 0000000000..4121d1b5d6 --- /dev/null +++ b/test/e2e/scenarios/retina/tcp/validate-tcp-connection-remote.go @@ -0,0 +1,45 @@ +package flow + +import ( + "fmt" + "log" + + prom "github.com/microsoft/retina/test/e2e/framework/prometheus" +) + +var tcpConnectionRemoteMetricName = "networkobservability_tcp_connection_remote" + +const ( + address = "address" + port = "port" +) + +type ValidateRetinaTCPConnectionRemoteMetric struct { + PortForwardedRetinaPort string +} + +func (v *ValidateRetinaTCPConnectionRemoteMetric) Run() error { + promAddress := fmt.Sprintf("http://localhost:%s/metrics", v.PortForwardedRetinaPort) + + validMetrics := []map[string]string{ + {address: "0.0.0.0", port: "0"}, + } + + for _, metric := range validMetrics { + err := prom.CheckMetric(promAddress, tcpConnectionRemoteMetricName, metric) + if err != nil { + return fmt.Errorf("failed to verify prometheus metrics: %w", err) + } + } + + log.Printf("found metrics matching %+v\n", validMetrics) + return nil +} + +func (v *ValidateRetinaTCPConnectionRemoteMetric) Prevalidate() error { + return nil +} + +func (v *ValidateRetinaTCPConnectionRemoteMetric) Stop() error { + return nil +} diff --git a/test/retry/retry.go b/test/retry/retry.go new file mode 100644 index 0000000000..7b46f9266e --- /dev/null +++ b/test/retry/retry.go @@ -0,0 +1,39 @@ +// todo: there are more robust retry packages out there, discuss with team +package retry + +import ( + "context" + "time" +) + +// a Retrier can attempt an operation multiple times, based on some thresholds +type Retrier struct { + Attempts int + Delay time.Duration + ExpBackoff bool +} + +func (r Retrier) Do(ctx context.Context, f func() error) error { + done := make(chan struct{}) + var err error + go func() { + defer func() { done <- struct{}{} }() + for i := 0; i < r.Attempts; i++ { + err = f() + if err == nil { + break + } + time.Sleep(r.Delay) + if r.ExpBackoff { + r.Delay *= 2 + } + } + }() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + return err + } +} From 7b111a0f868706e119b0bc09c8ac0284b56d1368 Mon Sep 17 00:00:00 2001 From: Mathew Merrick Date: Tue, 19 Mar 2024 20:25:47 +0000 Subject: [PATCH 2/2] lints --- .gitattributes | 2 ++ .gitignore | 3 +++ go.mod | 2 +- go.sum | 4 ++-- .../azure/create-cluster-with-npm.go | 21 ++++++++++++------- test/e2e/framework/generic/load-tag.go | 5 +++-- test/e2e/framework/kubernetes/get-logs.go | 8 ++++--- .../kubernetes/install-retina-helm.go | 9 +++++--- test/e2e/framework/prometheus/prometheus.go | 2 +- .../scenarios/retina/retina_scenarios_test.go | 6 +++--- .../retina/tcp/validate-flow-metric.go | 4 ++-- test/retry/retry.go | 7 ++++++- 12 files changed, 47 insertions(+), 26 deletions(-) diff --git a/.gitattributes b/.gitattributes index 6933ce63f0..f3103c7ac9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +1,3 @@ pkg/plugin/lib/** linguist-vendored=true + +*.go text eol=lf diff --git a/.gitignore b/.gitignore index d9cfd93bd7..6334d88b1b 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,6 @@ site/.docusaurus/ site/node_modules/ hack/tools/bin + +#vscode +.vscode/ diff --git a/go.mod b/go.mod index 201128663e..70e79f5cb4 100644 --- a/go.mod +++ b/go.mod @@ -212,7 +212,7 @@ require ( k8s.io/apiserver v0.29.3 // indirect k8s.io/component-base v0.29.3 // indirect k8s.io/cri-api v0.29.2 // indirect - oras.land/oras-go v1.2.4 // indirect + oras.land/oras-go v1.2.5 // indirect sigs.k8s.io/kustomize/api v0.14.0 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect ) diff --git a/go.sum b/go.sum index c19a309b5e..ac454b91f1 100644 --- a/go.sum +++ b/go.sum @@ -1121,8 +1121,8 @@ k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.4 h1:djpBY2/2Cs1PV87GSJlxv4voajVOMZxqqtq9AB8YNvY= -oras.land/oras-go v1.2.4/go.mod h1:DYcGfb3YF1nKjcezfX2SNlDAeQFKSXmf+qrFmrh4324= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3 h1:fic0YtUGSr79nv8vn3ziNZJrPZsm64KT/Fd/bc7Q6xY= sigs.k8s.io/controller-runtime v0.13.1-0.20230315234915-a26de2d610c3/go.mod h1:Qox07m8Gh7skSeOfppEWllPxNMhA7+b93D8Qjj6rBlQ= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/test/e2e/framework/azure/create-cluster-with-npm.go b/test/e2e/framework/azure/create-cluster-with-npm.go index 4c2c62609d..709ed3bfb1 100644 --- a/test/e2e/framework/azure/create-cluster-with-npm.go +++ b/test/e2e/framework/azure/create-cluster-with-npm.go @@ -18,9 +18,11 @@ var ( ) const ( - clusterTimeout = 10 * time.Minute - AgentARMSKU = "Standard_D4pls_v5" - AuxilaryNodeCount = 1 + clusterTimeout = 10 * time.Minute + clusterCreateTicker = 30 * time.Second + pollFrequency = 5 * time.Second + AgentARMSKU = "Standard_D4pls_v5" + AuxilaryNodeCount = 1 ) type CreateNPMCluster struct { @@ -49,7 +51,8 @@ func (c *CreateNPMCluster) Run() error { npmCluster.Properties.NetworkProfile.NetworkPolicy = to.Ptr(armcontainerservice.NetworkPolicyAzure) - npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ + //nolint:appendCombine // separate for verbosity + npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ //nolint:all Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), AvailabilityZones: []*string{to.Ptr("1")}, Count: to.Ptr[int32](AuxilaryNodeCount), @@ -64,6 +67,7 @@ func (c *CreateNPMCluster) Run() error { }) /* todo: add azlinux node pool + //nolint:appendCombine // separate for verbosity npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), AvailabilityZones: []*string{to.Ptr("1")}, @@ -78,7 +82,8 @@ func (c *CreateNPMCluster) Run() error { MaxPods: to.Ptr(int32(azure.MaxPodsPerNode)), }) */ - npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ + //nolint:appendCombine // separate for verbosity + npmCluster.Properties.AgentPoolProfiles = append(npmCluster.Properties.AgentPoolProfiles, &armcontainerservice.ManagedClusterAgentPoolProfile{ //nolint:all Type: to.Ptr(armcontainerservice.AgentPoolTypeVirtualMachineScaleSets), AvailabilityZones: []*string{to.Ptr("1")}, Count: to.Ptr[int32](AuxilaryNodeCount), @@ -116,7 +121,7 @@ func (c *CreateNPMCluster) Run() error { notifychan := make(chan struct{}) go func() { _, err = poller.PollUntilDone(ctx, &runtime.PollUntilDoneOptions{ - Frequency: 5 * time.Second, + Frequency: pollFrequency, }) if err != nil { log.Printf("failed to create cluster: %v\n", err) @@ -126,7 +131,7 @@ func (c *CreateNPMCluster) Run() error { close(notifychan) }() - ticker := time.NewTicker(30 * time.Second) + ticker := time.NewTicker(clusterCreateTicker) defer ticker.Stop() for { select { @@ -135,7 +140,7 @@ func (c *CreateNPMCluster) Run() error { case <-ticker.C: log.Printf("waiting for cluster %s to be ready...\n", c.ClusterName) case <-notifychan: - return err + return fmt.Errorf("received notification, failed to create cluster: %w", err) } } } diff --git a/test/e2e/framework/generic/load-tag.go b/test/e2e/framework/generic/load-tag.go index b5ee7652c3..d3131c2829 100644 --- a/test/e2e/framework/generic/load-tag.go +++ b/test/e2e/framework/generic/load-tag.go @@ -9,6 +9,8 @@ import ( const DefaultTagEnv = "TAG" +var ErrTagNotSet = fmt.Errorf("tag not set") + type LoadTag struct { TagEnv string } @@ -32,9 +34,8 @@ func (s *LoadTag) Prevalidate() error { log.Printf("using version \"%s\" from flag", tag) os.Setenv(s.TagEnv, tag) return nil - } else { - return fmt.Errorf("tag is not set from flag nor env %s", s.TagEnv) } + return fmt.Errorf("tag is not set from flag nor env %s: %w", s.TagEnv, ErrTagNotSet) } return nil } diff --git a/test/e2e/framework/kubernetes/get-logs.go b/test/e2e/framework/kubernetes/get-logs.go index 677eeb513b..7cd728af85 100644 --- a/test/e2e/framework/kubernetes/get-logs.go +++ b/test/e2e/framework/kubernetes/get-logs.go @@ -12,7 +12,7 @@ import ( "k8s.io/client-go/tools/clientcmd" ) -func PrintPodLogs(kubeconfigpath string, namespace string, labelSelector string) { +func PrintPodLogs(kubeconfigpath, namespace, labelSelector string) { // Load the kubeconfig file to get the configuration to access the cluster config, err := clientcmd.BuildConfigFromFlags("", kubeconfigpath) if err != nil { @@ -34,7 +34,8 @@ func PrintPodLogs(kubeconfigpath string, namespace string, labelSelector string) } // Iterate over the pods and get the logs for each pod - for _, pod := range pods.Items { + for i := range pods.Items { + pod := pods.Items[i] fmt.Printf("############################## logs for pod %s #########################\n", pod.Name) // Get the logs for the pod @@ -43,7 +44,6 @@ func PrintPodLogs(kubeconfigpath string, namespace string, labelSelector string) if err != nil { fmt.Printf("error getting logs for pod %s: %s\n", pod.Name, err) } - defer podLogs.Close() // Read the logs buf, err := io.ReadAll(podLogs) @@ -51,6 +51,8 @@ func PrintPodLogs(kubeconfigpath string, namespace string, labelSelector string) log.Printf("error reading logs for pod %s: %s\n", pod.Name, err) } + podLogs.Close() + // Print the logs log.Println(string(buf)) } diff --git a/test/e2e/framework/kubernetes/install-retina-helm.go b/test/e2e/framework/kubernetes/install-retina-helm.go index 93798386dd..70e74e6526 100644 --- a/test/e2e/framework/kubernetes/install-retina-helm.go +++ b/test/e2e/framework/kubernetes/install-retina-helm.go @@ -17,7 +17,10 @@ const ( deleteTimeout = 60 * time.Second ) -var errEmptyTag = fmt.Errorf("tag is empty") +var ( + errEmptyTag = fmt.Errorf("tag is empty") + errDirectoryNotFound = fmt.Errorf("directory not found") +) type InstallHelmChart struct { Namespace string @@ -99,8 +102,8 @@ func (i *InstallHelmChart) Prevalidate() error { if err != nil { return fmt.Errorf("failed to get current working directory %s: %w", cwd, err) } - fmt.Printf("the current working directory %s", cwd) - return fmt.Errorf("directory not found at %s: working directory: %s", i.ChartPath, cwd) + log.Printf("the current working directory %s", cwd) + return fmt.Errorf("directory not found at %s: working directory: %s: %w", i.ChartPath, cwd, errDirectoryNotFound) } log.Printf("found chart at %s", i.ChartPath) diff --git a/test/e2e/framework/prometheus/prometheus.go b/test/e2e/framework/prometheus/prometheus.go index 645ec2cd8f..603f0d96ac 100644 --- a/test/e2e/framework/prometheus/prometheus.go +++ b/test/e2e/framework/prometheus/prometheus.go @@ -21,7 +21,7 @@ var ( defaultRetryAttempts = 60 ) -func CheckMetric(promAddress string, metricName string, validMetric map[string]string) error { +func CheckMetric(promAddress, metricName string, validMetric map[string]string) error { defaultRetrier := retry.Retrier{Attempts: defaultRetryAttempts, Delay: defaultRetryDelay} ctx := context.Background() diff --git a/test/e2e/scenarios/retina/retina_scenarios_test.go b/test/e2e/scenarios/retina/retina_scenarios_test.go index 02595d129c..8a4ac2dca8 100644 --- a/test/e2e/scenarios/retina/retina_scenarios_test.go +++ b/test/e2e/scenarios/retina/retina_scenarios_test.go @@ -67,9 +67,9 @@ func TestE2ERetinaMetrics(t *testing.T) { // todo: enable mutating images in helm chart job.AddStep(&kubernetes.InstallHelmChart{ - Namespace: "kube-system", - ReleaseName: "retina", - ChartPath: "../../../../deploy/manifests/controller/helm/retina/", + Namespace: "kube-system", + ReleaseName: "retina", + ChartPath: "../../../../deploy/manifests/controller/helm/retina/", }, nil) job.AddScenario(drop.ValidateDropMetric()) diff --git a/test/e2e/scenarios/retina/tcp/validate-flow-metric.go b/test/e2e/scenarios/retina/tcp/validate-flow-metric.go index 4af54b766b..9ade9947b9 100644 --- a/test/e2e/scenarios/retina/tcp/validate-flow-metric.go +++ b/test/e2e/scenarios/retina/tcp/validate-flow-metric.go @@ -14,7 +14,7 @@ const ( established = "ESTABLISHED" listen = "LISTEN" - time_wait = "TIME_WAIT" + timewait = "TIME_WAIT" ) type ValidateRetinaTCPStateMetric struct { @@ -27,7 +27,7 @@ func (v *ValidateRetinaTCPStateMetric) Run() error { validMetrics := []map[string]string{ {state: established}, {state: listen}, - {state: time_wait}, + {state: timewait}, } for _, metric := range validMetrics { diff --git a/test/retry/retry.go b/test/retry/retry.go index 7b46f9266e..72a6b3f43c 100644 --- a/test/retry/retry.go +++ b/test/retry/retry.go @@ -3,6 +3,7 @@ package retry import ( "context" + "fmt" "time" ) @@ -32,7 +33,11 @@ func (r Retrier) Do(ctx context.Context, f func() error) error { select { case <-ctx.Done(): - return ctx.Err() + ctxErr := ctx.Err() + if ctxErr != nil { + return fmt.Errorf("context error: %w", ctxErr) + } + return nil case <-done: return err }