diff --git a/.github/CHANGELOG.md b/.github/CHANGELOG.md index 67c6a87a..908dd819 100644 --- a/.github/CHANGELOG.md +++ b/.github/CHANGELOG.md @@ -1,42 +1,84 @@ # Change Log All notable changes to this project will be documented in this file. -## [v0.21] - 2021-06-16 - -1e430f3 additional check in setting maxUnavailable (#255) - -## [v0.20] - 2021-06-14 - -3c70352 Upgrade to go 1.16 (#184) -42cf8d4 Improve warning logs (#242) -fce274e fix: status update failing due to unomitted fields (#251) - -## [v0.19] - 2021-05-18 - -5860eb7 #2285: renamed some methods related to metrics (#223) -4da0472 feat: add go-env support (#207) -cc1d2f6 fix: documentation and minor naming changes (#208) -3c26418 fix: log testenv errors (#209) -67cbab6 fix: logging improvements (#211) -708d88e fix: documentation improvements (#210) -95e28cd #2285: updated metric namespace for consistency with others (#219) -30b9ad9 #2285: added CR status metrics (#217) -51e3ff8 Issue 2108: step duration metrics to v1 (#216) -a1af90b Cache the ASG before nodes are rotated in a loop (#212) - -## [v0.18] - 2021-03-23 - -8b2d320 Fix for Launch definition validation. Consider only the "InService" instances. (#197) -42f810c Fail the CR for drain failures, when IgnoreDrainFailures isn't set. (#185) -f5c9457 output can contain other messages from API Server, so be more relaxed (#174) -391b2fb Expose template list and other execution errors to logs (#166) -757b669 Bump golang and busybox (#172) -b8f69e8 Add instance id to the logs (#173) -ac7be6b Fix namespaced name order (#170) -51f469d use standard fmt.Errorf to format error message; unify error format (#171) -b552c69 Bump dependencies. (#169) -36a2784 Remove separate module for pkg/log (#168) -237f93d Move constants to types so that they can be reused (#167) +## [v1.0.4] - 2021-10-04 +995b81b controller flags for ignoreDrainFailures and drainTimeout (#307) + + +## [v1.0.3] - 2021-09-03 +6252725 revert #300 (#305) +df08ab0 Set Instances to StandBy in batches (#303) +e77431c fix: fix panic when using MixedInstancesPolicy (#298) +1e6d29d Add ignoreDrainFailure and DrainTimeout as controller arguements (#300) + + +## [v1.0.2] - 2021-08-05 +d73da1b replace launchTemplate latest string with version number (#296) + +## [v1.0.1] - 2021-08-05 +52d80d9 check for ASG's launch template version instead latest. (#293) +c35445d Controller v2: fix BDD template and update Dockerfile with bash (#292) +db54e0b Controller v2: fix BDD template (#291) +b698dd6 Controller v2: remove cleaning up ruObject as BDD already does. (#290) +86412d5 Controller v2: increase memory/CPU limit and update args (#289) +2d8651c Controller v2: update args (#288) +835fd0d V2 bdd (#286) +998de0d V2 bdd (#285) +3841cc7 #2122: bdd changes for v2 (#284) +93626b4 Controller v2: BDD cron update (#283) +1be8190 Controller v2: BDD cron update (#282) +62c2255 Controller v2: BDD cron update (#280) +42abe52 Controller v2: BDD cron update (#279) +5bdc134 Controller v2 bdd changes (#278) + +## [v1.0.0] - 2021-07-21 + 7a4766d (HEAD -> controller-v2, origin/controller-v2) upgrade-manager-v2: Add CI github action, fix lint errors. (#276) + 00f7e89 upgrade-manager-v2: Fix unit tests (#275) + 0e64929 upgrade-manager-v2: Process next batch while waiting on nodeInterval period. (#273) + b2b39a0 upgrade-manager-v2: Add nodeEvents handler instead of a watch handler (#272) + c0a163b move cloud discovery after nodeInterval / drainInterval wait (#270) + b15838e Carry the metrics status in RollingUpgrade CR (#267) + 610f454 upgrade-manager-v2: remove function duplicate declaration. (#266) + a4e0e84 upgrade-manager-v2: expose totalProcessing time and other metrics (#265) + 2390ea0 and CR end time (#264) + 79db022 (tag: v1.0.0-RC1) Add a mock for test and update version in Makefile (#262) + 3eafd00 Fix metrics calculation issue (#258) + 376657f Revert "Fix metrics collecting issue (#249)" (#256) + f5dd1cb Fix metrics collecting issue (#249) + 066731d final push before RC release. (#254) + 18e0e75 upgrade-manager-v2: Load test fixes (#245) + 1fc5847 metricsMutex should be initialized (#240) + a9ac50f add missing parenthesis (#239) + 6fef5fd V2 controller metrics concurrency fix (#231) + a490333 upgrade-manager-v2: Move DrainManager back to Reconciler (#236) + b659e0f Resolve compile errors caused by merge conflict. (#235) + b664fdd Create RollingUpgradeContext (#234) + b8d0e72 #2286: removed version from metric namespace (#227) + c445af9 #2285: renamed some methods related to metrics (#224) + 1f0f075 #2285: rollup CR statistic metrics in v2 (#218) + d5935e3 Unit tests for controller-v2 (#215) + 665c64b Fix bug in deleting the entry in syncMap (#203) + 77f985c Ignore generated code (#201) + 71b310a Refine metrics implementation to support goroutines (#196) + 668c5d8 Move the DrainManager within ReplaceBatch(), to access one per RollingUpgrade CR (#195) + 728dae9 Process the batch rotation in parallel (#192) + 14e950e Metrics features (#189) + 11d3ae6 Eager mode implementation (#183) + 57df5a5 Implemented node drain. (#181) + dd6a332 Migrate Script Runner (#179) + 2c1d8e7 Controller v2: Implementation of Instance termination (#178) + 7cb15b0 Fix all the "make vet" errors in Controller V2 branch. (#177) + 59e9b0d Implemented RollingUpgrade object validation. (#176) + 5cb9efb initial rotation logic + 6b8dad5 AWS API calls & Drift detection + 335fb4f aws API calls + 41bd571 Add kubernetes API calls + 8f33f1e add more scaffolding + 25644a6 initial code + 87afbd6 add API + 2816490 scaffolding + 3ad13b8 delete all + 6ce7953 Delete README.md ## [v0.17] - 2020-12-11 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 2f0c9165..68907d1f 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1,18 +1,14 @@ # How to contribute -## Development -- Open an issue and discuss the feature / bug you want -- Fork () -- Clone your fork -- Install [kubebuilder](https://book.kubebuilder.io/quick-start.html) -- `make test` to ensure everything is working -- Create your feature branch (`git checkout -b feature/fooBar`) -- Implement your change and add tests -- `make test` to ensure everything is working -- Commit your changes (`git commit -am 'Add some fooBar'`) -- Add a "Testing Done" section in the commit message. Be as explicit as possible about all the manual and automated tests performed. -- Push to the branch (`git push origin feature/fooBar`) -- Create a new Pull Request +1. Fork it () +2. Add your fork as a git remote named, say `reviews`. +2. Open an issue and discuss the feature / bug +3. Create your feature branch in your fork (`git checkout -b feature/fooBar`) +4. Commit your changes (`git commit -am 'Add some fooBar'`) +5. Add a "Testing Done" section in the commit message. Be as explicit as possible about all the manual and automated tests performed. +6. Push to the branch (`git push reviews feature/fooBar`) +7. Make sure unit tests and any static-analysis (linting) tests are passing +8. Create a new Pull Request ## How to report a bug diff --git a/.github/workflows/bdd.yaml b/.github/workflows/bdd.yaml index 85a1fbcd..47b78ad9 100644 --- a/.github/workflows/bdd.yaml +++ b/.github/workflows/bdd.yaml @@ -1,8 +1,9 @@ name: BDD on: - schedule: - - cron: '0 7 * * *' # UTC is being used, 07:00 am would be 12:00am in PT + push: + branches: + - master jobs: build: @@ -41,4 +42,5 @@ jobs: $HOME/go/bin/godog - name: Cleanup - run: kubectl delete deployment upgrade-manager-controller-manager -n upgrade-manager-system \ No newline at end of file + run: | + kubectl delete deployment upgrade-manager-controller-manager -n upgrade-manager-system diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 620e29ff..6c9d8b7b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -14,6 +14,7 @@ on: jobs: build: name: CI # Lint, Test, Codecov, Docker build & Push + if: github.repository == 'keikoproj/upgrade-manager' runs-on: ubuntu-latest steps: @@ -39,7 +40,6 @@ jobs: mv kubebuilder_${version}_linux_${arch} kubebuilder && sudo mv kubebuilder /usr/local/ # update your PATH to include /usr/local/kubebuilder/bin export PATH=$PATH:/usr/local/kubebuilder/bin - - name: Run Tests run: make test diff --git a/.gitignore b/.gitignore index b16e66dd..cebb35f8 100644 --- a/.gitignore +++ b/.gitignore @@ -12,10 +12,11 @@ bin # Output of the go coverage tool, specifically when used with LiteIDE *.out -coverage.txt # Kubernetes Generated files - skip generated files, except for vendored files + !vendor/**/zz_generated.* +!api/**/zz_generated.* # editor and IDE paraphernalia .idea diff --git a/.go-version b/.go-version deleted file mode 100644 index 04cc9994..00000000 --- a/.go-version +++ /dev/null @@ -1 +0,0 @@ -1.15.6 diff --git a/Dockerfile b/Dockerfile index 15f166b7..382fc5e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.16 as builder +FROM golang:1.15 as builder WORKDIR /workspace # Copy the Go Modules manifests @@ -7,7 +7,6 @@ COPY go.mod go.mod COPY go.sum go.sum # cache deps before building and copying source so that we don't need to re-download as much # and so that source changes don't invalidate our downloaded layer -COPY pkg pkg RUN go mod download # Copy the go source @@ -18,19 +17,15 @@ COPY controllers/ controllers/ # Build RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go -# Add kubectl -RUN curl -L https://storage.googleapis.com/kubernetes-release/release/v1.14.10/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl -RUN chmod +x /usr/local/bin/kubectl - # Add busybox FROM busybox:1.32.1 as shelladder # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:latest +FROM gcr.io/distroless/static:nonroot WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 COPY --from=shelladder /bin/sh /bin/sh -COPY --from=builder /workspace/manager . -COPY --from=builder /usr/local/bin/kubectl /usr/local/bin/kubectl ENTRYPOINT ["/manager"] diff --git a/LICENSE b/LICENSE deleted file mode 100644 index bbfa79ac..00000000 --- a/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2019 The KeikoProj Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile index 6e0f3cac..643368d9 100644 --- a/Makefile +++ b/Makefile @@ -1,39 +1,50 @@ -VERSION=0.21 +VERSION=1.0.4 # Image URL to use all building/pushing image targets -IMG ?= keikoproj/rolling-upgrade-controller:${VERSION} +IMG ?= controller:latest # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) -CRD_OPTIONS ?= "crd:trivialVersions=true" +CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" -export GO111MODULE = on +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif all: manager # Run tests +ENVTEST_ASSETS_DIR=$(shell pwd)/testbin test: generate fmt vet manifests - go test -v ./api/... ./controllers/... -coverprofile coverage.txt + mkdir -p ${ENVTEST_ASSETS_DIR} + test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh + source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile coverage.txt go tool cover -html=./coverage.txt -o cover.html -# Run golangci lint tests -lint: - golangci-lint run ./... -.PHONY: lint - # Build manager binary manager: generate fmt vet go build -o bin/manager main.go # Run against the configured Kubernetes cluster in ~/.kube/config -run: generate fmt vet +run: generate fmt vet manifests go run ./main.go # Install CRDs into a cluster -install: manifests - kubectl apply -f config/crd/bases +install: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl apply -f - + +# Uninstall CRDs from a cluster +uninstall: manifests kustomize + $(KUSTOMIZE) build config/crd | kubectl delete -f - # Deploy controller in the configured Kubernetes cluster in ~/.kube/config -deploy: manifests - kubectl apply -f config/crd/bases - kustomize build config/default | kubectl apply -f - +deploy: manifests kustomize + cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} + $(KUSTOMIZE) build config/default | kubectl apply -f - + +# UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config +undeploy: + $(KUSTOMIZE) build config/default | kubectl delete -f - # Generate manifests e.g. CRD, RBAC etc. manifests: controller-gen @@ -49,27 +60,36 @@ vet: # Generate code generate: controller-gen - $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." # Build the docker image -docker-build: - docker build . -t ${IMG} - docker tag ${IMG} keikoproj/rolling-upgrade-controller:latest - @echo "updating kustomize image patch file for manager resource" - sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml +docker-build: test + docker build -t ${IMG} . # Push the docker image docker-push: docker push ${IMG} -# find or download controller-gen -# download controller-gen if necessary +# Download controller-gen locally if necessary +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen controller-gen: -ifeq (, $(shell which controller-gen)) - export GO111MODULE=off # https://stackoverflow.com/questions/54415733/getting-gopath-error-go-cannot-use-pathversion-syntax-in-gopath-mode-in-ubun - go clean -modcache - go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.4 -CONTROLLER_GEN=$(shell go env GOPATH)/bin/controller-gen -else -CONTROLLER_GEN=$(shell which controller-gen) -endif + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) + +# Download kustomize locally if necessary +KUSTOMIZE = $(shell pwd)/bin/kustomize +kustomize: + $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) + +# go-get-tool will 'go get' any package $2 and install it to $1. +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +define go-get-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ +rm -rf $$TMP_DIR ;\ +} +endef diff --git a/PROJECT b/PROJECT index 9a80e1c2..2cbd982d 100644 --- a/PROJECT +++ b/PROJECT @@ -1,7 +1,11 @@ -version: "2" domain: keikoproj.io +layout: go.kubebuilder.io/v3 +projectName: upgrade-manager repo: github.com/keikoproj/upgrade-manager resources: -- group: upgrademgr - version: v1alpha1 +- api: + crdVersion: v1 + group: upgrademgr kind: RollingUpgrade + version: v1alpha1 +version: 3-alpha diff --git a/README.md b/README.md deleted file mode 100644 index d5fe45e2..00000000 --- a/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# RollingUpgrade - -![Build Status](https://github.com/keikoproj/upgrade-manager/workflows/Build-Test/badge.svg) ![Build Status](https://github.com/keikoproj/upgrade-manager/workflows/BDD/badge.svg) [![codecov](https://codecov.io/gh/keikoproj/upgrade-manager/branch/master/graph/badge.svg)](https://codecov.io/gh/keikoproj/upgrade-manager) - -> Reliable, extensible rolling-upgrades of Autoscaling groups in Kubernetes - -RollingUpgrade provides a Kubernetes native mechanism for doing rolling-updates of instances in an AutoScaling group using a CRD and a controller. - -## What does it do? - -- RollingUpgrade is highly inspired by the way kops does rolling-updates. - -- It provides similar options for the rolling-updates as kops and more. - -- The RollingUpgrade Kubernetes custom resource has the following options in the spec: - - `asgName`: Name of the autoscaling group to perform the rolling-update. - - `preDrain.script`: The script to run before draining a node. - - `postDrain.script`: The script to run after draining a node. This allows for performing actions such as quiescing network traffic, adding labels, etc. - - `postDrain.waitSeconds`: The seconds to wait after a node is drained. - - `postDrain.postWaitScript`: The script to run after the node is drained and the waitSeconds have passed. This can be used for ensuring that the drained pods actually were able to start elsewhere. - - `nodeIntervalSeconds`: The amount of time in seconds to wait after each node in the ASG is terminated. - - `postTerminate.script`: Optional bash script to execute after the node has terminated. - - `strategy.mode`: This field is optional and allows for two possible modes - - `lazy` - this is the default mode, upgrade will terminate an instance first. - - `eager` - upgrade will launch an instance prior to terminating. - - `strategy.type`: This field is optional and currently two strategies are supported - - `randomUpdate` - Default is type is not specified. Picks nodes randomly for updating. Refer to [random_update_strategy.yaml](examples/random_update_strategy.yaml) for sample custom resource definition. - - `uniformAcrossAzUpdate` - Picks same number of nodes or same percentage of nodes from each AZ for update. Refer to [uniform_across_az_update_strategy.yaml](examples/uniform_across_az_update_strategy.yaml) for sample custom resource definition. - - `strategy.maxUnavailable`: Optional field. The number of nodes that can be unavailable during rolling upgrade, can be specified as number of nodes or the percent of total number of nodes. Default is "1". - - `strategy.drainTimeout`: Optional field. Node will be terminated after drain timeout even if `kubectl drain` has not been completed and value has to be specified in seconds. Default is -1. - -- After performing the rolling-update of the nodes in the ASG, RollingUpgrade puts the following data in the "Status" field. - - `currentStatus`: Whether the rolling-update completed or errored out. - - `startTime`: The RFC3339 timestamp when the rolling-update began. E.g. 2019-01-15T23:51:10Z - - `endTime`: The RFC3339 timestamp when the rolling-update completed. E.g. 2019-01-15T00:35:10Z - - `nodesProcessed`: The number of ec2 instances that were processed. - - `conditions`: Conditions describing the lifecycle of the rolling-update. - -## Design - -For each RollingUpgrade custom resource that is submitted, the following flowchart shows the sequence of actions taken to [perform the rolling-update](docs/RollingUpgradeDesign.png) - -## Dependencies - -- Kubernetes cluster on AWS with nodes in AutoscalingGroups. rolling-upgrades have been tested with Kubernetes clusters v1.12+. -- An IAM role with at least the policy specified below. The upgrade-manager should be run with that IAM role. - -## Installing - -### Complete step by step guide to create a cluster and run rolling-upgrades - -For a complete, step by step guide for creating a cluster with kops, editing it and then running rolling-upgrades, please see [this](docs/step-by-step-example.md) - -### Existing cluster in AWS - -If you already have an existing cluster created using kops, follow the instructions below. - -- Ensure that you have a Kubernetes cluster on AWS. -- Install the CRD using: `kubectl apply -f https://raw.githubusercontent.com/keikoproj/upgrade-manager/master/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml` -- Install the controller using: -`kubectl create -f https://raw.githubusercontent.com/keikoproj/upgrade-manager/master/deploy/rolling-upgrade-controller-deploy.yaml` - -- Note that the rolling-upgrade controller requires an IAM role with the following policy - -``` json -{ - "Effect": "Allow", - "Action": [ - "ec2:CreateTags", - "ec2:DescribeInstances", - "autoscaling:EnterStandby", - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:TerminateInstanceInAutoScalingGroup" - ], - "Resource": [ - "*" - ] -} -``` - -- If the rolling-upgrade controller is directly using the IAM role of the node it runs on, the above policy will have to be added to the IAM role of the node. -- If the rolling-upgrade controller is using it's own role created using KIAM, that role should have the above policy in it. - -## For more details and FAQs, refer to [this](docs/faq.md) diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go index 1c1f3f62..dd7c9b64 100644 --- a/api/v1alpha1/groupversion_info.go +++ b/api/v1alpha1/groupversion_info.go @@ -1,4 +1,5 @@ /* +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +15,8 @@ limitations under the License. */ // Package v1alpha1 contains API Schema definitions for the upgrademgr v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=upgrademgr.keikoproj.io +//+kubebuilder:object:generate=true +//+groupName=upgrademgr.keikoproj.io package v1alpha1 import ( diff --git a/api/v1alpha1/rollingupgrade_types.go b/api/v1alpha1/rollingupgrade_types.go index 3ee64cd6..53e359be 100644 --- a/api/v1alpha1/rollingupgrade_types.go +++ b/api/v1alpha1/rollingupgrade_types.go @@ -1,4 +1,5 @@ /* +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,7 +18,8 @@ package v1alpha1 import ( "fmt" - "sync" + "strconv" + "strings" "time" "github.com/keikoproj/upgrade-manager/controllers/common" @@ -26,62 +28,37 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" ) -// PreDrainSpec contains the fields for actions taken before draining the node. -type PreDrainSpec struct { - Script string `json:"script,omitempty"` -} - -// PostDrainSpec contains the fields for actions taken after draining the node. -type PostDrainSpec struct { - Script string `json:"script,omitempty"` - WaitSeconds int64 `json:"waitSeconds,omitempty"` - PostWaitScript string `json:"postWaitScript,omitempty"` -} - -// PostTerminateSpec contains the fields for actions taken after terminating the node. -type PostTerminateSpec struct { - Script string `json:"script,omitempty"` -} - // RollingUpgradeSpec defines the desired state of RollingUpgrade type RollingUpgradeSpec struct { - PostDrainDelaySeconds int `json:"postDrainDelaySeconds,omitempty"` - NodeIntervalSeconds int `json:"nodeIntervalSeconds,omitempty"` - // AsgName is AWS Autoscaling Group name to roll. - AsgName string `json:"asgName,omitempty"` - PreDrain PreDrainSpec `json:"preDrain,omitempty"` - PostDrain PostDrainSpec `json:"postDrain,omitempty"` - PostTerminate PostTerminateSpec `json:"postTerminate,omitempty"` - Strategy UpdateStrategy `json:"strategy,omitempty"` - // IgnoreDrainFailures allows ignoring node drain failures and proceed with rolling upgrade. - IgnoreDrainFailures bool `json:"ignoreDrainFailures,omitempty"` - // ForceRefresh enables draining and terminating the node even if the launch config/template hasn't changed. - ForceRefresh bool `json:"forceRefresh,omitempty"` - // ReadinessGates allow to specify label selectors that node must match to be considered ready. - ReadinessGates []NodeReadinessGate `json:"readinessGates,omitempty"` -} - -type NodeReadinessGate struct { - MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + PostDrainDelaySeconds int `json:"postDrainDelaySeconds,omitempty"` + NodeIntervalSeconds int `json:"nodeIntervalSeconds,omitempty"` + AsgName string `json:"asgName,omitempty"` + PreDrain PreDrainSpec `json:"preDrain,omitempty"` + PostDrain PostDrainSpec `json:"postDrain,omitempty"` + PostTerminate PostTerminateSpec `json:"postTerminate,omitempty"` + Strategy UpdateStrategy `json:"strategy,omitempty"` + IgnoreDrainFailures *bool `json:"ignoreDrainFailures,omitempty"` + ForceRefresh bool `json:"forceRefresh,omitempty"` + ReadinessGates []NodeReadinessGate `json:"readinessGates,omitempty"` } // RollingUpgradeStatus defines the observed state of RollingUpgrade type RollingUpgradeStatus struct { - CurrentStatus string `json:"currentStatus,omitempty"` - StartTime string `json:"startTime,omitempty"` - EndTime string `json:"endTime,omitempty"` - TotalProcessingTime string `json:"totalProcessingTime,omitempty"` - NodesProcessed int `json:"nodesProcessed,omitempty"` - TotalNodes int `json:"totalNodes,omitempty"` - + CurrentStatus string `json:"currentStatus,omitempty"` + StartTime string `json:"startTime,omitempty"` + EndTime string `json:"endTime,omitempty"` + TotalProcessingTime string `json:"totalProcessingTime,omitempty"` + NodesProcessed int `json:"nodesProcessed,omitempty"` + TotalNodes int `json:"totalNodes,omitempty"` + CompletePercentage string `json:"completePercentage,omitempty"` Conditions []RollingUpgradeCondition `json:"conditions,omitempty"` LastNodeTerminationTime *metav1.Time `json:"lastTerminationTime,omitempty"` LastNodeDrainTime *metav1.Time `json:"lastDrainTime,omitempty"` Statistics []*RollingUpgradeStatistics `json:"statistics,omitempty"` // For backward compatibility - LastBatchNodes []string `json:"lastBatchNodes,omitempty"` - //NodeInProcessing map[string]*NodeInProcessing `json:"nodeInProcessing,omitempty"` + LastBatchNodes []string `json:"lastBatchNodes,omitempty"` + NodeInProcessing map[string]*NodeInProcessing `json:"nodeInProcessing,omitempty"` } // RollingUpgrade Statistics, includes summary(sum/count) from each step @@ -91,15 +68,96 @@ type RollingUpgradeStatistics struct { DurationCount int32 `json:"durationCount,omitempty"` } +// RollingUpgrade Node step information +type NodeStepDuration struct { + GroupName string `json:"groupName,omitempty"` + NodeName string `json:"nodeName,omitempty"` + StepName RollingUpgradeStep `json:"stepName,omitempty"` + Duration metav1.Duration `json:"duration,omitempty"` +} + +// Node In-processing +type NodeInProcessing struct { + NodeName string `json:"nodeName,omitempty"` + StepName RollingUpgradeStep `json:"stepName,omitempty"` + UpgradeStartTime metav1.Time `json:"upgradeStartTime,omitempty"` + StepStartTime metav1.Time `json:"stepStartTime,omitempty"` + StepEndTime metav1.Time `json:"stepEndTime,omitempty"` +} + +func (s *RollingUpgradeStatus) SetCondition(cond RollingUpgradeCondition) { + // if condition exists, overwrite, otherwise append + for ix, c := range s.Conditions { + if c.Type == cond.Type { + s.Conditions[ix] = cond + return + } + } + s.Conditions = append(s.Conditions, cond) +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=rollingupgrades,scope=Namespaced,shortName=ru +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.currentStatus",description="current status of the rollingupgarde" +// +kubebuilder:printcolumn:name="TotalNodes",type="string",JSONPath=".status.totalNodes",description="total nodes involved in the rollingupgarde" +// +kubebuilder:printcolumn:name="NodesProcessed",type="string",JSONPath=".status.nodesProcessed",description="current number of nodes processed in the rollingupgarde" +// +kubebuilder:printcolumn:name="Complete",type="string",JSONPath=".status.completePercentage",description="percentage of completion for the rollingupgrade CR" + +// RollingUpgrade is the Schema for the rollingupgrades API +type RollingUpgrade struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RollingUpgradeSpec `json:"spec,omitempty"` + Status RollingUpgradeStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// RollingUpgradeList contains a list of RollingUpgrade +type RollingUpgradeList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []RollingUpgrade `json:"items"` +} + +func init() { + SchemeBuilder.Register(&RollingUpgrade{}, &RollingUpgradeList{}) +} + +// PreDrainSpec contains the fields for actions taken before draining the node. +type PreDrainSpec struct { + Script string `json:"script,omitempty"` +} + +// PostDrainSpec contains the fields for actions taken after draining the node. +type PostDrainSpec struct { + Script string `json:"script,omitempty"` + WaitSeconds int64 `json:"waitSeconds,omitempty"` + PostWaitScript string `json:"postWaitScript,omitempty"` +} + +// PostTerminateSpec contains the fields for actions taken after terminating the node. +type PostTerminateSpec struct { + Script string `json:"script,omitempty"` +} + +type NodeReadinessGate struct { + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` +} + type RollingUpgradeStep string const ( - // StatusRunning marks the CR to be running. - StatusRunning = "running" - // StatusComplete marks the CR as completed. + // Status + StatusInit = "init" + StatusRunning = "running" StatusComplete = "completed" - // StatusError marks the CR as errored out. - StatusError = "error" + StatusError = "error" + + // Conditions + UpgradeComplete UpgradeConditionType = "Complete" NodeRotationTotal RollingUpgradeStep = "total" @@ -111,6 +169,7 @@ const ( NodeRotationPostWait RollingUpgradeStep = "post_wait" NodeRotationTerminate RollingUpgradeStep = "terminate" NodeRotationPostTerminate RollingUpgradeStep = "post_terminate" + NodeRotationTerminated RollingUpgradeStep = "terminated" NodeRotationCompleted RollingUpgradeStep = "completed" ) @@ -123,206 +182,203 @@ var NodeRotationStepOrders = map[RollingUpgradeStep]int{ NodeRotationPostWait: 60, NodeRotationTerminate: 70, NodeRotationPostTerminate: 80, + NodeRotationTerminated: 90, NodeRotationCompleted: 1000, } +var ( + FiniteStates = []string{StatusComplete, StatusError} + AllowedStrategyType = []string{string(RandomUpdateStrategy), string(UniformAcrossAzUpdateStrategy)} + AllowedStrategyMode = []string{string(UpdateStrategyModeLazy), string(UpdateStrategyModeEager)} + DefaultRequeueTime = time.Second * 30 +) + // RollingUpgradeCondition describes the state of the RollingUpgrade type RollingUpgradeCondition struct { Type UpgradeConditionType `json:"type,omitempty"` Status corev1.ConditionStatus `json:"status,omitempty"` } -// RollingUpgrade Node step information -type NodeStepDuration struct { - GroupName string `json:"groupName,omitempty"` - NodeName string `json:"nodeName,omitempty"` - StepName RollingUpgradeStep `json:"stepName,omitempty"` - Duration metav1.Duration `json:"duration,omitempty"` +type UpdateStrategyType string +type UpdateStrategyMode string +type UpgradeConditionType string + +const ( + RandomUpdateStrategy UpdateStrategyType = "randomUpdate" + UniformAcrossAzUpdateStrategy UpdateStrategyType = "uniformAcrossAzUpdate" + + UpdateStrategyModeLazy UpdateStrategyMode = "lazy" + UpdateStrategyModeEager UpdateStrategyMode = "eager" +) + +// UpdateStrategy holds the information needed to perform update based on different update strategies +type UpdateStrategy struct { + Type UpdateStrategyType `json:"type,omitempty"` + Mode UpdateStrategyMode `json:"mode,omitempty"` + MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` + DrainTimeout *int `json:"drainTimeout,omitempty"` } -// Node In-processing -type NodeInProcessing struct { - NodeName string `json:"nodeName,omitempty"` - StepName RollingUpgradeStep `json:"stepName,omitempty"` - UpgradeStartTime metav1.Time `json:"upgradeStartTime,omitempty"` - StepStartTime metav1.Time `json:"stepStartTime,omitempty"` - StepEndTime metav1.Time `json:"stepEndTime,omitempty"` +func (c UpdateStrategyMode) String() string { + return string(c) } -// Update last batch nodes -func (s *RollingUpgradeStatus) UpdateLastBatchNodes(batchNodes map[string]*NodeInProcessing) { - //s.NodeInProcessing = batchNodes - keys := make([]string, 0, len(batchNodes)) - for k := range batchNodes { - keys = append(keys, k) - } - s.LastBatchNodes = keys +// NamespacedName returns namespaced name of the object. +func (r *RollingUpgrade) NamespacedName() string { + return fmt.Sprintf("%s/%s", r.Namespace, r.Name) } -// Update Node Statistics -func (s *RollingUpgradeStatus) UpdateStatistics(nodeSteps map[string][]NodeStepDuration) { - for _, v := range nodeSteps { - for _, step := range v { - s.AddNodeStepDuration(step) - } - } +func (r *RollingUpgrade) ScalingGroupName() string { + return r.Spec.AsgName } -// Add one step duration -func (s *RollingUpgradeStatus) AddNodeStepDuration(nsd NodeStepDuration) { - // if step exists, add count and sum, otherwise append - for _, s := range s.Statistics { - if s.StepName == nsd.StepName { - s.DurationSum = metav1.Duration{ - Duration: s.DurationSum.Duration + nsd.Duration.Duration, - } - s.DurationCount += 1 - return - } - } - s.Statistics = append(s.Statistics, &RollingUpgradeStatistics{ - StepName: nsd.StepName, - DurationSum: metav1.Duration{ - Duration: nsd.Duration.Duration, - }, - DurationCount: 1, - }) -} - -// Node turns onto step -func (s *RollingUpgradeStatus) NodeStep(InProcessingNodes map[string]*NodeInProcessing, - nodeSteps map[string][]NodeStepDuration, groupName, nodeName string, stepName RollingUpgradeStep, mutex *sync.Mutex) { - - var inProcessingNode *NodeInProcessing - if n, ok := InProcessingNodes[nodeName]; !ok { - inProcessingNode = &NodeInProcessing{ - NodeName: nodeName, - StepName: stepName, - UpgradeStartTime: metav1.Now(), - StepStartTime: metav1.Now(), - } - InProcessingNodes[nodeName] = inProcessingNode - } else { - inProcessingNode = n - } +func (r *RollingUpgrade) DrainTimeout() *int { + return r.Spec.Strategy.DrainTimeout +} - inProcessingNode.StepEndTime = metav1.Now() - var duration = inProcessingNode.StepEndTime.Sub(inProcessingNode.StepStartTime.Time) - if stepName == NodeRotationCompleted { - //Add overall and remove the node from in-processing map - var total = inProcessingNode.StepEndTime.Sub(inProcessingNode.UpgradeStartTime.Time) - duration1 := s.ToStepDuration(groupName, nodeName, inProcessingNode.StepName, duration) - duration2 := s.ToStepDuration(groupName, nodeName, NodeRotationTotal, total) - s.addNodeStepDuration(nodeSteps, nodeName, duration1, mutex) - s.addNodeStepDuration(nodeSteps, nodeName, duration2, mutex) - } else if inProcessingNode.StepName != stepName { //Still same step - var oldOrder = NodeRotationStepOrders[inProcessingNode.StepName] - var newOrder = NodeRotationStepOrders[stepName] - if newOrder > oldOrder { //Make sure the steps running in order - stepDuration := s.ToStepDuration(groupName, nodeName, inProcessingNode.StepName, duration) - inProcessingNode.StepStartTime = metav1.Now() - inProcessingNode.StepName = stepName - s.addNodeStepDuration(nodeSteps, nodeName, stepDuration, mutex) - } - } +func (r *RollingUpgrade) PostTerminateScript() string { + return r.Spec.PostTerminate.Script } -func (s *RollingUpgradeStatus) addNodeStepDuration(steps map[string][]NodeStepDuration, nodeName string, nsd NodeStepDuration, mutex *sync.Mutex) { - mutex.Lock() - if stepDuration, ok := steps[nodeName]; !ok { - steps[nodeName] = []NodeStepDuration{ - nsd, - } - } else { - stepDuration = append(stepDuration, nsd) - steps[nodeName] = stepDuration - } - mutex.Unlock() -} - -// Add one step duration -func (s *RollingUpgradeStatus) ToStepDuration(groupName, nodeName string, stepName RollingUpgradeStep, duration time.Duration) NodeStepDuration { - //Add to system level statistics - common.AddStepDuration(groupName, string(stepName), duration) - return NodeStepDuration{ - GroupName: groupName, - NodeName: nodeName, - StepName: stepName, - Duration: metav1.Duration{ - Duration: duration, - }, - } +func (r *RollingUpgrade) PostWaitScript() string { + return r.Spec.PostDrain.PostWaitScript } -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=rollingupgrades,scope=Namespaced,shortName=ru -// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.currentStatus",description="current status of the rollingupgarde" -// +kubebuilder:printcolumn:name="TotalNodes",type="string",JSONPath=".status.totalNodes",description="total nodes involved in the rollingupgarde" -// +kubebuilder:printcolumn:name="NodesProcessed",type="string",JSONPath=".status.nodesProcessed",description="current number of nodes processed in the rollingupgarde" +func (r *RollingUpgrade) PreDrainScript() string { + return r.Spec.PreDrain.Script +} -// RollingUpgrade is the Schema for the rollingupgrades API -type RollingUpgrade struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` +func (r *RollingUpgrade) PostDrainScript() string { + return r.Spec.PostDrain.Script +} - Spec RollingUpgradeSpec `json:"spec,omitempty"` - Status RollingUpgradeStatus `json:"status,omitempty"` +func (r *RollingUpgrade) CurrentStatus() string { + return r.Status.CurrentStatus } -// +kubebuilder:object:root=true +func (r *RollingUpgrade) UpdateStrategyType() UpdateStrategyType { + return r.Spec.Strategy.Type +} -// RollingUpgradeList contains a list of RollingUpgrade -type RollingUpgradeList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []RollingUpgrade `json:"items"` +func (r *RollingUpgrade) MaxUnavailable() intstr.IntOrString { + return r.Spec.Strategy.MaxUnavailable } -func init() { - SchemeBuilder.Register(&RollingUpgrade{}, &RollingUpgradeList{}) +func (r *RollingUpgrade) LastNodeTerminationTime() *metav1.Time { + return r.Status.LastNodeTerminationTime } -// UpdateStrategyType indicates how the update has to be rolled out -// whether to roll the update AZ wise or all Azs at once -type UpdateStrategyType string +func (r *RollingUpgrade) SetLastNodeTerminationTime(t *metav1.Time) { + r.Status.LastNodeTerminationTime = t +} -type UpdateStrategyMode string +func (r *RollingUpgrade) LastNodeDrainTime() *metav1.Time { + return r.Status.LastNodeDrainTime +} -type UpgradeConditionType string +func (r *RollingUpgrade) SetLastNodeDrainTime(t *metav1.Time) { + r.Status.LastNodeDrainTime = t +} -const ( - // RandomUpdate strategy treats all the availability zones as a single unit and picks random nodes for update. - RandomUpdateStrategy UpdateStrategyType = "randomUpdate" +func (r *RollingUpgrade) NodeIntervalSeconds() int { + return r.Spec.NodeIntervalSeconds +} - // UniformAcrossAzUpdateStrategy Picks same number of nodes or same percentage of nodes from each AZ for update. - UniformAcrossAzUpdateStrategy UpdateStrategyType = "uniformAcrossAzUpdate" +func (r *RollingUpgrade) PostDrainDelaySeconds() int { + return r.Spec.PostDrainDelaySeconds +} - UpdateStrategyModeLazy UpdateStrategyMode = "lazy" - UpdateStrategyModeEager UpdateStrategyMode = "eager" +func (r *RollingUpgrade) SetCurrentStatus(status string) { + r.Status.CurrentStatus = status +} - // Other update strategies such as rolling update by AZ or rolling update with a pre-defined instance list - // can be implemented in future by adding more update strategy types +func (r *RollingUpgrade) SetStartTime(t string) { + r.Status.StartTime = t +} - UpgradeComplete UpgradeConditionType = "Complete" -) +func (r *RollingUpgrade) StartTime() string { + return r.Status.StartTime +} -func (c UpdateStrategyMode) String() string { - return string(c) +func (r *RollingUpgrade) SetEndTime(t string) { + r.Status.EndTime = t } -// NamespacedName returns namespaced name of the object. -func (r RollingUpgrade) NamespacedName() string { - return fmt.Sprintf("%s/%s", r.Namespace, r.Name) +func (r *RollingUpgrade) EndTime() string { + return r.Status.EndTime } -// UpdateStrategy holds the information needed to perform update based on different update strategies -type UpdateStrategy struct { - Type UpdateStrategyType `json:"type,omitempty"` - Mode UpdateStrategyMode `json:"mode,omitempty"` - // MaxUnavailable can be specified as number of nodes or the percent of total number of nodes - MaxUnavailable intstr.IntOrString `json:"maxUnavailable,omitempty"` - // Node will be terminated after drain timeout even if `kubectl drain` has not been completed - // and value has to be specified in seconds - DrainTimeout int `json:"drainTimeout"` +func (r *RollingUpgrade) SetTotalProcessingTime(t string) { + r.Status.TotalProcessingTime = t +} + +func (r *RollingUpgrade) SetTotalNodes(n int) { + r.Status.TotalNodes = n +} + +func (r *RollingUpgrade) SetNodesProcessed(n int) { + r.Status.NodesProcessed = n +} + +func (r *RollingUpgrade) SetCompletePercentage(n int) { + r.Status.CompletePercentage = fmt.Sprintf("%s%%", strconv.Itoa(n)) +} +func (r *RollingUpgrade) GetStatus() RollingUpgradeStatus { + return r.Status +} + +func (r *RollingUpgrade) IsForceRefresh() bool { + return r.Spec.ForceRefresh +} + +func (r *RollingUpgrade) IsIgnoreDrainFailures() *bool { + return r.Spec.IgnoreDrainFailures +} +func (r *RollingUpgrade) StrategyMode() UpdateStrategyMode { + return r.Spec.Strategy.Mode +} + +func (r *RollingUpgrade) Validate() (bool, error) { + strategy := r.Spec.Strategy + + // validating the Type value + if strategy.Type == "" { + r.Spec.Strategy.Type = RandomUpdateStrategy + } else if !common.ContainsEqualFold(AllowedStrategyType, string(strategy.Type)) { + err := fmt.Errorf("%s: Invalid value for startegy Type - %d", r.Name, strategy.MaxUnavailable.IntVal) + return false, err + } + + // validating the Mode value + if strategy.Mode == "" { + r.Spec.Strategy.Mode = UpdateStrategyModeLazy + } else if !common.ContainsEqualFold(AllowedStrategyMode, string(strategy.Mode)) { + err := fmt.Errorf("%s: Invalid value for startegy Mode - %d", r.Name, strategy.MaxUnavailable.IntVal) + return false, err + } + + // validating the maxUnavailable value + if strategy.MaxUnavailable.Type == intstr.Int && strategy.MaxUnavailable.IntVal == 0 { + r.Spec.Strategy.MaxUnavailable.IntVal = 1 + } else if strategy.MaxUnavailable.Type == intstr.Int && strategy.MaxUnavailable.IntVal < 0 { + err := fmt.Errorf("%s: Invalid value for startegy maxUnavailable - %d", r.Name, strategy.MaxUnavailable.IntVal) + return false, err + } else if strategy.MaxUnavailable.Type == intstr.String { + intValue, _ := strconv.Atoi(strings.Trim(strategy.MaxUnavailable.StrVal, "%")) + if intValue <= 0 || intValue > 100 { + err := fmt.Errorf("%s: Invalid value for startegy maxUnavailable - %s", r.Name, strategy.MaxUnavailable.StrVal) + return false, err + } + } + + // validating the DrainTimeout value + if strategy.DrainTimeout != nil { + if *strategy.DrainTimeout == 0 { + *r.Spec.Strategy.DrainTimeout = -1 + } else if *strategy.DrainTimeout < -1 { + err := fmt.Errorf("%s: Invalid value for startegy DrainTimeout - %d", r.Name, strategy.MaxUnavailable.IntVal) + return false, err + } + } + return true, nil } diff --git a/api/v1alpha1/rollingupgrade_types_test.go b/api/v1alpha1/rollingupgrade_types_test.go deleted file mode 100644 index 8f0f5b6a..00000000 --- a/api/v1alpha1/rollingupgrade_types_test.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "sync" - "testing" - - . "github.com/onsi/ginkgo" - "github.com/onsi/gomega" - . "github.com/onsi/gomega" - - "golang.org/x/net/context" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// These tests are written in BDD-style using Ginkgo framework. Refer to -// http://onsi.github.io/ginkgo to learn more. - -var _ = Describe("RollingUpgrade", func() { - var ( - key types.NamespacedName - created, fetched *RollingUpgrade - ) - - BeforeEach(func() { - // Add any setup steps that needs to be executed before each test - }) - - AfterEach(func() { - // Add any teardown steps that needs to be executed after each test - }) - - Context("NamespacedName", func() { - It("generates qualified name", func() { - ru := &RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Namespace: "namespace-foo", Name: "object-bar"}} - Expect(ru.NamespacedName()).To(Equal("namespace-foo/object-bar")) - }) - }) - - // Add Tests for OpenAPI validation (or additonal CRD features) specified in - // your API definition. - // Avoid adding tests for vanilla CRUD operations because they would - // test Kubernetes API server, which isn't the goal here. - Context("Create API", func() { - - It("should create an object successfully", func() { - - key = types.NamespacedName{ - Name: "foo", - Namespace: "default", - } - created = &RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }} - - By("creating an API obj") - Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) - - fetched = &RollingUpgrade{} - Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) - Expect(fetched).To(Equal(created)) - - By("deleting the created object") - Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) - Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) - }) - - }) - -}) - -// Test -func TestNodeTurnsOntoStep(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - r := &RollingUpgradeStatus{} - //A map to retain the steps for multiple nodes - nodeSteps := make(map[string][]NodeStepDuration) - inProcessingNodes := make(map[string]*NodeInProcessing) - mutex := &sync.Mutex{} - - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", NodeRotationKickoff, mutex) - - g.Expect(inProcessingNodes).NotTo(gomega.BeNil()) - g.Expect(nodeSteps["node-1"]).To(gomega.BeNil()) - - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", NodeRotationDesiredNodeReady, mutex) - - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) - g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(NodeRotationKickoff)) - - //Retry desired_node_ready - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", NodeRotationDesiredNodeReady, mutex) - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) - g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(NodeRotationKickoff)) - - //Retry desired_node_ready again - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", NodeRotationDesiredNodeReady, mutex) - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) - g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(NodeRotationKickoff)) - - //Completed - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", NodeRotationCompleted, mutex) - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) - g.Expect(nodeSteps["node-1"][1].StepName).To(gomega.Equal(NodeRotationDesiredNodeReady)) - g.Expect(nodeSteps["node-1"][2].StepName).To(gomega.Equal(NodeRotationTotal)) - - //Second node - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-2", NodeRotationKickoff, mutex) - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) - - r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-2", NodeRotationDesiredNodeReady, mutex) - g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) - - r.UpdateLastBatchNodes(inProcessingNodes) - g.Expect(len(r.LastBatchNodes)).To(gomega.Equal(2)) - - r.UpdateStatistics(nodeSteps) - g.Expect(r.Statistics).ToNot(gomega.BeEmpty()) - g.Expect(len(r.Statistics)).To(gomega.Equal(3)) - -} diff --git a/api/v1alpha1/suite_test.go b/api/v1alpha1/suite_test.go deleted file mode 100644 index 16948287..00000000 --- a/api/v1alpha1/suite_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "v1alpha1 Suite", - []Reporter{envtest.NewlineReporter{}}) -} - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - } - - err := SchemeBuilder.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).ToNot(HaveOccurred()) - Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 9420634b..3057b24e 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,7 @@ // +build !ignore_autogenerated /* +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,12 +16,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// autogenerated by controller-gen object, do not modify manually +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -205,7 +205,12 @@ func (in *RollingUpgradeSpec) DeepCopyInto(out *RollingUpgradeSpec) { out.PreDrain = in.PreDrain out.PostDrain = in.PostDrain out.PostTerminate = in.PostTerminate - out.Strategy = in.Strategy + in.Strategy.DeepCopyInto(&out.Strategy) + if in.IgnoreDrainFailures != nil { + in, out := &in.IgnoreDrainFailures, &out.IgnoreDrainFailures + *out = new(bool) + **out = **in + } if in.ReadinessGates != nil { in, out := &in.ReadinessGates, &out.ReadinessGates *out = make([]NodeReadinessGate, len(*in)) @@ -251,13 +256,11 @@ func (in *RollingUpgradeStatus) DeepCopyInto(out *RollingUpgradeStatus) { } if in.LastNodeTerminationTime != nil { in, out := &in.LastNodeTerminationTime, &out.LastNodeTerminationTime - *out = new(v1.Time) - (*in).DeepCopyInto(*out) + *out = (*in).DeepCopy() } if in.LastNodeDrainTime != nil { in, out := &in.LastNodeDrainTime, &out.LastNodeDrainTime - *out = new(v1.Time) - (*in).DeepCopyInto(*out) + *out = (*in).DeepCopy() } if in.Statistics != nil { in, out := &in.Statistics, &out.Statistics @@ -275,6 +278,21 @@ func (in *RollingUpgradeStatus) DeepCopyInto(out *RollingUpgradeStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.NodeInProcessing != nil { + in, out := &in.NodeInProcessing, &out.NodeInProcessing + *out = make(map[string]*NodeInProcessing, len(*in)) + for key, val := range *in { + var outVal *NodeInProcessing + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(NodeInProcessing) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpgradeStatus. @@ -291,6 +309,11 @@ func (in *RollingUpgradeStatus) DeepCopy() *RollingUpgradeStatus { func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { *out = *in out.MaxUnavailable = in.MaxUnavailable + if in.DrainTimeout != nil { + in, out := &in.DrainTimeout, &out.DrainTimeout + *out = new(int) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. diff --git a/config/certmanager/certificate.yaml b/config/certmanager/certificate.yaml index 9d6bad1e..52d86618 100644 --- a/config/certmanager/certificate.yaml +++ b/config/certmanager/certificate.yaml @@ -1,6 +1,7 @@ # The following manifests contain a self-signed issuer CR and a certificate CR. # More document can be found at https://docs.cert-manager.io -apiVersion: certmanager.k8s.io/v1alpha1 +# WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. +apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: selfsigned-issuer @@ -8,16 +9,16 @@ metadata: spec: selfSigned: {} --- -apiVersion: certmanager.k8s.io/v1alpha1 +apiVersion: cert-manager.io/v1 kind: Certificate metadata: name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml namespace: system spec: - # $(SERVICENAME) and $(NAMESPACE) will be substituted by kustomize - commonName: $(SERVICENAME).$(NAMESPACE).svc + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize dnsNames: - - $(SERVICENAME).$(NAMESPACE).svc.cluster.local + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local issuerRef: kind: Issuer name: selfsigned-issuer diff --git a/config/certmanager/kustomization.yaml b/config/certmanager/kustomization.yaml index 8181bc3a..bebea5a5 100644 --- a/config/certmanager/kustomization.yaml +++ b/config/certmanager/kustomization.yaml @@ -1,26 +1,5 @@ resources: - certificate.yaml -# the following config is for teaching kustomize how to do var substitution -vars: -- name: NAMESPACE # namespace of the service and the certificate CR - objref: - kind: Service - version: v1 - name: webhook-service - fieldref: - fieldpath: metadata.namespace -- name: CERTIFICATENAME - objref: - kind: Certificate - group: certmanager.k8s.io - version: v1alpha1 - name: serving-cert # this name should match the one in certificate.yaml -- name: SERVICENAME - objref: - kind: Service - version: v1 - name: webhook-service - configurations: - kustomizeconfig.yaml diff --git a/config/certmanager/kustomizeconfig.yaml b/config/certmanager/kustomizeconfig.yaml index 49e0b1e7..90d7c313 100644 --- a/config/certmanager/kustomizeconfig.yaml +++ b/config/certmanager/kustomizeconfig.yaml @@ -1,16 +1,16 @@ # This configuration is for teaching kustomize how to update name ref and var substitution nameReference: - kind: Issuer - group: certmanager.k8s.io + group: cert-manager.io fieldSpecs: - kind: Certificate - group: certmanager.k8s.io + group: cert-manager.io path: spec/issuerRef/name varReference: - kind: Certificate - group: certmanager.k8s.io + group: cert-manager.io path: spec/commonName - kind: Certificate - group: certmanager.k8s.io + group: cert-manager.io path: spec/dnsNames diff --git a/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml b/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml index 358bc173..b7c9ef39 100644 --- a/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml +++ b/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml @@ -1,26 +1,13 @@ --- -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.2.4 + controller-gen.kubebuilder.io/version: v0.4.1 creationTimestamp: null name: rollingupgrades.upgrademgr.keikoproj.io spec: - additionalPrinterColumns: - - JSONPath: .status.currentStatus - description: current status of the rollingupgarde - name: Status - type: string - - JSONPath: .status.totalNodes - description: total nodes involved in the rollingupgarde - name: TotalNodes - type: string - - JSONPath: .status.nodesProcessed - description: current number of nodes processed in the rollingupgarde - name: NodesProcessed - type: string group: upgrademgr.keikoproj.io names: kind: RollingUpgrade @@ -30,162 +17,184 @@ spec: - ru singular: rollingupgrade scope: Namespaced - subresources: - status: {} - validation: - openAPIV3Schema: - description: RollingUpgrade is the Schema for the rollingupgrades API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: RollingUpgradeSpec defines the desired state of RollingUpgrade - properties: - asgName: - description: AsgName is AWS Autoscaling Group name to roll. - type: string - forceRefresh: - description: ForceRefresh enables draining and terminating the node - even if the launch config/template hasn't changed. - type: boolean - ignoreDrainFailures: - description: IgnoreDrainFailures allows ignoring node drain failures - and proceed with rolling upgrade. - type: boolean - nodeIntervalSeconds: - type: integer - postDrain: - description: PostDrainSpec contains the fields for actions taken after - draining the node. - properties: - postWaitScript: - type: string - script: - type: string - waitSeconds: - format: int64 - type: integer - type: object - postDrainDelaySeconds: - type: integer - postTerminate: - description: PostTerminateSpec contains the fields for actions taken - after terminating the node. - properties: - script: - type: string - type: object - preDrain: - description: PreDrainSpec contains the fields for actions taken before - draining the node. - properties: - script: - type: string - type: object - readinessGates: - description: ReadinessGates allow to specify label selectors that node - must match to be considered ready. - items: + versions: + - additionalPrinterColumns: + - description: current status of the rollingupgarde + jsonPath: .status.currentStatus + name: Status + type: string + - description: total nodes involved in the rollingupgarde + jsonPath: .status.totalNodes + name: TotalNodes + type: string + - description: current number of nodes processed in the rollingupgarde + jsonPath: .status.nodesProcessed + name: NodesProcessed + type: string + - description: percentage of completion for the rollingupgrade CR + jsonPath: .status.completePercentage + name: Complete + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: RollingUpgrade is the Schema for the rollingupgrades API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: RollingUpgradeSpec defines the desired state of RollingUpgrade + properties: + asgName: + type: string + forceRefresh: + type: boolean + ignoreDrainFailures: + type: boolean + nodeIntervalSeconds: + type: integer + postDrain: + description: PostDrainSpec contains the fields for actions taken after + draining the node. properties: - matchLabels: - additionalProperties: - type: string - type: object + postWaitScript: + type: string + script: + type: string + waitSeconds: + format: int64 + type: integer type: object - type: array - strategy: - description: UpdateStrategy holds the information needed to perform - update based on different update strategies - properties: - drainTimeout: - description: Node will be terminated after drain timeout even if - `kubectl drain` has not been completed and value has to be specified - in seconds - type: integer - maxUnavailable: - anyOf: - - type: integer - - type: string - description: MaxUnavailable can be specified as number of nodes - or the percent of total number of nodes - x-kubernetes-int-or-string: true - mode: - type: string - type: - description: UpdateStrategyType indicates how the update has to - be rolled out whether to roll the update AZ wise or all Azs at - once - type: string - required: - - drainTimeout - type: object - type: object - status: - description: RollingUpgradeStatus defines the observed state of RollingUpgrade - properties: - conditions: - items: - description: RollingUpgradeCondition describes the state of the RollingUpgrade + postDrainDelaySeconds: + type: integer + postTerminate: + description: PostTerminateSpec contains the fields for actions taken + after terminating the node. properties: - status: + script: type: string - type: + type: object + preDrain: + description: PreDrainSpec contains the fields for actions taken before + draining the node. + properties: + script: type: string type: object - type: array - currentStatus: - type: string - endTime: - type: string - lastBatchNodes: - items: - type: string - type: array - lastDrainTime: - format: date-time - type: string - lastTerminationTime: - format: date-time - type: string - nodesProcessed: - type: integer - startTime: - type: string - statistics: - items: - description: RollingUpgrade Statistics, includes summary(sum/count) - from each step + readinessGates: + items: + properties: + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: array + strategy: + description: UpdateStrategy holds the information needed to perform + update based on different update strategies properties: - durationCount: - format: int32 + drainTimeout: type: integer - durationSum: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + mode: type: string - stepName: + type: type: string type: object - type: array - totalNodes: - type: integer - totalProcessingTime: - type: string - type: object - type: object - version: v1alpha1 - versions: - - name: v1alpha1 + type: object + status: + description: RollingUpgradeStatus defines the observed state of RollingUpgrade + properties: + completePercentage: + type: string + conditions: + items: + description: RollingUpgradeCondition describes the state of the + RollingUpgrade + properties: + status: + type: string + type: + type: string + type: object + type: array + currentStatus: + type: string + endTime: + type: string + lastBatchNodes: + description: For backward compatibility + items: + type: string + type: array + lastDrainTime: + format: date-time + type: string + lastTerminationTime: + format: date-time + type: string + nodeInProcessing: + additionalProperties: + description: Node In-processing + properties: + nodeName: + type: string + stepEndTime: + format: date-time + type: string + stepName: + type: string + stepStartTime: + format: date-time + type: string + upgradeStartTime: + format: date-time + type: string + type: object + type: object + nodesProcessed: + type: integer + startTime: + type: string + statistics: + items: + description: RollingUpgrade Statistics, includes summary(sum/count) + from each step + properties: + durationCount: + format: int32 + type: integer + durationSum: + type: string + stepName: + type: string + type: object + type: array + totalNodes: + type: integer + totalProcessingTime: + type: string + type: object + type: object served: true storage: true + subresources: + status: {} status: acceptedNames: kind: "" diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b02f5d3f..7b0bdd48 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,16 +3,18 @@ # It should be run by config/default resources: - bases/upgrademgr.keikoproj.io_rollingupgrades.yaml -# +kubebuilder:scaffold:crdkustomizeresource +#+kubebuilder:scaffold:crdkustomizeresource -patches: -# [WEBHOOK] patches here are for enabling the conversion webhook for each CRD +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD #- patches/webhook_in_rollingupgrades.yaml -# +kubebuilder:scaffold:crdkustomizewebhookpatch +#+kubebuilder:scaffold:crdkustomizewebhookpatch -# [CAINJECTION] patches here are for enabling the CA injection for each CRD +# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD #- patches/cainjection_in_rollingupgrades.yaml -# +kubebuilder:scaffold:crdkustomizecainjectionpatch +#+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. configurations: diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index 6f83d9a9..ec5c150a 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -4,13 +4,15 @@ nameReference: version: v1 fieldSpecs: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/name + path: spec/conversion/webhook/clientConfig/service/name namespace: - kind: CustomResourceDefinition + version: v1 group: apiextensions.k8s.io - path: spec/conversion/webhookClientConfig/service/namespace + path: spec/conversion/webhook/clientConfig/service/namespace create: false varReference: diff --git a/config/crd/patches/cainjection_in_rollingupgrades.yaml b/config/crd/patches/cainjection_in_rollingupgrades.yaml index 34e9b4c6..0aea76d9 100644 --- a/config/crd/patches/cainjection_in_rollingupgrades.yaml +++ b/config/crd/patches/cainjection_in_rollingupgrades.yaml @@ -1,8 +1,7 @@ # The following patch adds a directive for certmanager to inject CA into the CRD -# CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) name: rollingupgrades.upgrademgr.keikoproj.io diff --git a/config/crd/patches/webhook_in_rollingupgrades.yaml b/config/crd/patches/webhook_in_rollingupgrades.yaml index 7b230fec..93193e9f 100644 --- a/config/crd/patches/webhook_in_rollingupgrades.yaml +++ b/config/crd/patches/webhook_in_rollingupgrades.yaml @@ -1,17 +1,14 @@ -# The following patch enables conversion webhook for CRD -# CRD conversion requires k8s 1.13 or later. -apiVersion: apiextensions.k8s.io/v1beta1 +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: name: rollingupgrades.upgrademgr.keikoproj.io spec: conversion: strategy: Webhook - webhookClientConfig: - # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, - # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) - caBundle: Cg== - service: - namespace: system - name: webhook-service - path: /convert + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 6fd0f327..2f84da22 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -16,28 +16,60 @@ bases: - ../crd - ../rbac - ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml #- ../webhook -# [CERTMANAGER] To enable cert-manager, uncomment next line. 'WEBHOOK' components are required. +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. #- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus -patches: +patchesStrategicMerge: - manager_image_patch.yaml - # Protect the /metrics endpoint by putting it behind auth. - # Only one of manager_auth_proxy_patch.yaml and - # manager_prometheus_metrics_patch.yaml should be enabled. +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. - manager_auth_proxy_patch.yaml - # If you want your controller-manager to expose the /metrics - # endpoint w/o any authn/z, uncomment the following line and - # comment manager_auth_proxy_patch.yaml. - # Only one of manager_auth_proxy_patch.yaml and - # manager_prometheus_metrics_patch.yaml should be enabled. -#- manager_prometheus_metrics_patch.yaml - -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml + +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml #- manager_webhook_patch.yaml -# [CAINJECTION] Uncomment next line to enable the CA injection in the admission webhooks. -# Uncomment 'CAINJECTION' in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. # 'CERTMANAGER' needs to be enabled to use ca injection #- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index d3994fb9..24d0f5ae 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -1,5 +1,5 @@ -# This patch inject a sidecar container which is a HTTP proxy for the controller manager, -# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. apiVersion: apps/v1 kind: Deployment metadata: @@ -10,7 +10,7 @@ spec: spec: containers: - name: kube-rbac-proxy - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 args: - "--secure-listen-address=0.0.0.0:8443" - "--upstream=http://127.0.0.1:8080/" @@ -21,4 +21,4 @@ spec: name: https - name: manager args: - - "--metrics-addr=127.0.0.1:8080" + - "--max-parallel=10" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml new file mode 100644 index 00000000..6c400155 --- /dev/null +++ b/config/default/manager_config_patch.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + args: + - "--config=controller_manager_config.yaml" + volumeMounts: + - name: manager-config + mountPath: /controller_manager_config.yaml + subPath: controller_manager_config.yaml + volumes: + - name: manager-config + configMap: + name: manager-config diff --git a/config/default/manager_image_patch.yaml b/config/default/manager_image_patch.yaml index b54f47b9..a77c96af 100644 --- a/config/default/manager_image_patch.yaml +++ b/config/default/manager_image_patch.yaml @@ -8,5 +8,5 @@ spec: spec: containers: # Change the value of image field below to your controller image URL - - image: keikoproj/rolling-upgrade-controller:0.21 + - image: keikoproj/rolling-upgrade-controller:1.0.4 name: manager diff --git a/config/default/manager_prometheus_metrics_patch.yaml b/config/default/manager_prometheus_metrics_patch.yaml deleted file mode 100644 index 0b96c681..00000000 --- a/config/default/manager_prometheus_metrics_patch.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# This patch enables Prometheus scraping for the manager pod. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - metadata: - annotations: - prometheus.io/scrape: 'true' - spec: - containers: - # Expose the prometheus metrics on default port - - name: manager - ports: - - containerPort: 8080 - name: metrics - protocol: TCP diff --git a/config/default/manager_webhook_patch.yaml b/config/default/manager_webhook_patch.yaml deleted file mode 100644 index f2f7157b..00000000 --- a/config/default/manager_webhook_patch.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager - ports: - - containerPort: 443 - name: webhook-server - protocol: TCP - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: webhook-server-cert diff --git a/config/default/webhookcainjection_patch.yaml b/config/default/webhookcainjection_patch.yaml deleted file mode 100644 index f6d71cb7..00000000 --- a/config/default/webhookcainjection_patch.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# This patch add annotation to admission webhook config and -# the variables $(NAMESPACE) and $(CERTIFICATENAME) will be substituted by kustomize. -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - name: mutating-webhook-configuration - annotations: - certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) ---- -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: validating-webhook-configuration - annotations: - certmanager.k8s.io/inject-ca-from: $(NAMESPACE)/$(CERTIFICATENAME) diff --git a/config/manager/controller_manager_config.yaml b/config/manager/controller_manager_config.yaml new file mode 100644 index 00000000..79b2b804 --- /dev/null +++ b/config/manager/controller_manager_config.yaml @@ -0,0 +1,11 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: ControllerManagerConfig +health: + healthProbeBindAddress: :8081 +metrics: + bindAddress: 127.0.0.1:8080 +webhook: + port: 9443 +leaderElection: + leaderElect: true + resourceName: d6edb06e.keikoproj.io diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b84..2bcd3eea 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,10 @@ resources: - manager.yaml + +generatorOptions: + disableNameSuffixHash: true + +configMapGenerator: +- name: manager-config + files: + - controller_manager_config.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index b6c85a52..55627a42 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -22,18 +22,34 @@ spec: labels: control-plane: controller-manager spec: + securityContext: + runAsUser: 65532 containers: - command: - /manager args: - - --enable-leader-election + - --leader-elect image: controller:latest name: manager + securityContext: + allowPrivilegeEscalation: false + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 resources: limits: cpu: 100m - memory: 30Mi + memory: 300Mi requests: cpu: 100m - memory: 20Mi + memory: 200Mi terminationGracePeriodSeconds: 10 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..ed137168 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 00000000..9b8047b7 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,16 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 00000000..bd4af137 --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,7 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: ["/metrics"] + verbs: ["get"] diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index d61e5469..6cf656be 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -1,10 +1,6 @@ apiVersion: v1 kind: Service metadata: - annotations: - prometheus.io/port: "8443" - prometheus.io/scheme: https - prometheus.io/scrape: "true" labels: control-plane: controller-manager name: controller-manager-metrics-service diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 817f1fe6..66c28338 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -3,9 +3,10 @@ resources: - role_binding.yaml - leader_election_role.yaml - leader_election_role_binding.yaml -# Comment the following 3 lines if you want to disable +# Comment the following 4 lines if you want to disable # the auth proxy (https://github.com/brancz/kube-rbac-proxy) # which protects your /metrics endpoint. - auth_proxy_service.yaml - auth_proxy_role.yaml - auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index 85093a8c..6334cc51 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -6,8 +6,10 @@ metadata: rules: - apiGroups: - "" + - coordination.k8s.io resources: - configmaps + - leases verbs: - get - list @@ -19,8 +21,7 @@ rules: - apiGroups: - "" resources: - - configmaps/status + - events verbs: - - get - - update + - create - patch diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5c4ea57f..b01753b3 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -35,6 +35,7 @@ rules: - get - list - patch + - watch - apiGroups: - "" resources: diff --git a/config/rbac/rollingupgrade_editor_role.yaml b/config/rbac/rollingupgrade_editor_role.yaml new file mode 100644 index 00000000..00ffd93e --- /dev/null +++ b/config/rbac/rollingupgrade_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit rollingupgrades. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rollingupgrade-editor-role +rules: +- apiGroups: + - upgrademgr.keikoproj.io + resources: + - rollingupgrades + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - upgrademgr.keikoproj.io + resources: + - rollingupgrades/status + verbs: + - get diff --git a/config/rbac/rollingupgrade_viewer_role.yaml b/config/rbac/rollingupgrade_viewer_role.yaml new file mode 100644 index 00000000..2eb5bcf4 --- /dev/null +++ b/config/rbac/rollingupgrade_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view rollingupgrades. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: rollingupgrade-viewer-role +rules: +- apiGroups: + - upgrademgr.keikoproj.io + resources: + - rollingupgrades + verbs: + - get + - list + - watch +- apiGroups: + - upgrademgr.keikoproj.io + resources: + - rollingupgrades/status + verbs: + - get diff --git a/config/samples/upgrademgr_v1alpha1_rollingupgrade.yaml b/config/samples/upgrademgr_v1alpha1_rollingupgrade.yaml index e60a88b5..bac61e87 100644 --- a/config/samples/upgrademgr_v1alpha1_rollingupgrade.yaml +++ b/config/samples/upgrademgr_v1alpha1_rollingupgrade.yaml @@ -1,7 +1,7 @@ apiVersion: upgrademgr.keikoproj.io/v1alpha1 kind: RollingUpgrade metadata: - generateName: rollingupgrade-sample- + name: rollingupgrade-sample spec: # Add fields here foo: bar diff --git a/config/webhook/kustomization.yaml b/config/webhook/kustomization.yaml deleted file mode 100644 index 9cf26134..00000000 --- a/config/webhook/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -resources: -- manifests.yaml -- service.yaml - -configurations: -- kustomizeconfig.yaml diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml deleted file mode 100644 index 25e21e3c..00000000 --- a/config/webhook/kustomizeconfig.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - -varReference: -- path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml deleted file mode 100644 index b4861025..00000000 --- a/config/webhook/service.yaml +++ /dev/null @@ -1,12 +0,0 @@ - -apiVersion: v1 -kind: Service -metadata: - name: webhook-service - namespace: system -spec: - ports: - - port: 443 - targetPort: 443 - selector: - control-plane: controller-manager diff --git a/controllers/cloud.go b/controllers/cloud.go new file mode 100644 index 00000000..dc19f72d --- /dev/null +++ b/controllers/cloud.go @@ -0,0 +1,71 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/go-logr/logr" + + corev1 "k8s.io/api/core/v1" + + "github.com/pkg/errors" +) + +var ( + instanceStateTagKey = "upgrademgr.keikoproj.io/state" + inProgressTagValue = "in-progress" +) + +type DiscoveredState struct { + *RollingUpgradeAuthenticator + logr.Logger + ClusterNodes []*corev1.Node + LaunchTemplates []*ec2.LaunchTemplate + ScalingGroups []*autoscaling.Group + InProgressInstances []string +} + +func NewDiscoveredState(auth *RollingUpgradeAuthenticator, logger logr.Logger) *DiscoveredState { + return &DiscoveredState{ + RollingUpgradeAuthenticator: auth, + Logger: logger, + } +} + +func (d *DiscoveredState) Discover() error { + + launchTemplates, err := d.AmazonClientSet.DescribeLaunchTemplates() + if err != nil { + return errors.Wrap(err, "failed to discover launch templates") + } + d.LaunchTemplates = launchTemplates + + scalingGroups, err := d.AmazonClientSet.DescribeScalingGroups() + if err != nil { + return errors.Wrap(err, "failed to discover scaling groups") + } + d.ScalingGroups = scalingGroups + + inProgressInstances, err := d.AmazonClientSet.DescribeTaggedInstanceIDs(instanceStateTagKey, inProgressTagValue) + if err != nil { + return errors.Wrap(err, "failed to discover ec2 instances") + } + d.InProgressInstances = inProgressInstances + + return nil +} diff --git a/pkg/log/log.go b/controllers/common/log/log.go similarity index 89% rename from pkg/log/log.go rename to controllers/common/log/log.go index 6f42c02a..4d6f588b 100644 --- a/pkg/log/log.go +++ b/controllers/common/log/log.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package log import ( diff --git a/pkg/log/retry_logger.go b/controllers/common/log/retry.go similarity index 61% rename from pkg/log/retry_logger.go rename to controllers/common/log/retry.go index ab7e4cc3..9cdaaae9 100644 --- a/pkg/log/retry_logger.go +++ b/controllers/common/log/retry.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package log import ( diff --git a/controllers/common/metrics.go b/controllers/common/metrics.go index 1c8949d9..f1909cc0 100644 --- a/controllers/common/metrics.go +++ b/controllers/common/metrics.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/keikoproj/upgrade-manager/pkg/log" + "github.com/keikoproj/upgrade-manager/controllers/common/log" "github.com/prometheus/client_golang/prometheus" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -61,7 +61,7 @@ func InitMetrics() { metrics.Registry.MustRegister(CRStatus) } -//Observe total processing time +// observe total processing time func TotalProcessingTime(groupName string, duration time.Duration) { var summary prometheus.Summary if s, ok := totalProcessingTime[groupName]; !ok { diff --git a/controllers/common/metrics_test.go b/controllers/common/metrics_test.go index 8bc917b2..8d96626b 100644 --- a/controllers/common/metrics_test.go +++ b/controllers/common/metrics_test.go @@ -47,27 +47,3 @@ func TestCRStatusCompleted(t *testing.T) { g.Expect(err).To(gomega.BeNil()) g.Expect(gauage).ToNot(gomega.BeNil()) } - -func TestTotalProcessingTime(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - g.Expect(totalProcessingTime["test-asg"]).To(gomega.BeNil()) - TotalProcessingTime("test-asg", 1) - g.Expect(totalProcessingTime["test-asg"]).NotTo(gomega.BeNil()) -} - -func TestSetTotalNodesMetric(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - g.Expect(totalNodesMetrics["test-asg"]).To(gomega.BeNil()) - SetTotalNodesMetric("test-asg", 1) - g.Expect(totalNodesMetrics["test-asg"]).NotTo(gomega.BeNil()) -} - -func TestSetNodesProcessedMetric(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - g.Expect(nodesProcessedMetrics["test-asg"]).To(gomega.BeNil()) - SetNodesProcessedMetric("test-asg", 1) - g.Expect(nodesProcessedMetrics["test-asg"]).NotTo(gomega.BeNil()) -} diff --git a/controllers/common/utils.go b/controllers/common/utils.go new file mode 100644 index 00000000..a8c6b3e6 --- /dev/null +++ b/controllers/common/utils.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "strings" +) + +// ContainsEqualFold returns true if a given slice 'slice' contains string 's' under unicode case-folding +func ContainsEqualFold(slice []string, s string) bool { + for _, item := range slice { + if strings.EqualFold(item, s) { + return true + } + } + return false +} + +func IntMax(a, b int) int { + if a > b { + return a + } + return b +} + +func IntMin(a, b int) int { + if a < b { + return a + } + return b +} + +func GetChunks(items []string, chunkSize int) [][]string { + var chunks [][]string + for i := 0; i < len(items); i += chunkSize { + end := IntMin(i+chunkSize, len(items)) + chunks = append(chunks, items[i:end]) + } + return chunks +} diff --git a/controllers/events.go b/controllers/events.go deleted file mode 100644 index 9265b8cb..00000000 --- a/controllers/events.go +++ /dev/null @@ -1,75 +0,0 @@ -package controllers - -import ( - "encoding/json" - "fmt" - "math/rand" - "time" - - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/keikoproj/upgrade-manager/pkg/log" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// EventReason defines the reason of an event -type EventReason string - -// EventLevel defines the level of an event -type EventLevel string - -const ( - // EventLevelNormal is the level of a normal event - EventLevelNormal = "Normal" - // EventLevelWarning is the level of a warning event - EventLevelWarning = "Warning" - // EventReasonRUStarted Rolling Upgrade Started - EventReasonRUStarted EventReason = "RollingUpgradeStarted" - // EventReasonRUInstanceStarted Rolling Upgrade for Instance has started - EventReasonRUInstanceStarted EventReason = "RollingUpgradeInstanceStarted" - // EventReasonRUInstanceFinished Rolling Upgrade for Instance has finished - EventReasonRUInstanceFinished EventReason = "RollingUpgradeInstanceFinished" - // EventReasonRUFinished Rolling Upgrade Finished - EventReasonRUFinished EventReason = "RollingUpgradeFinished" -) - -func (r *RollingUpgradeReconciler) createK8sV1Event(objMeta *upgrademgrv1alpha1.RollingUpgrade, reason EventReason, level string, msgFields map[string]string) *v1.Event { - // Marshal as JSON - // I think it is very tough to trigger this error since json.Marshal function can return two types of errors - // UnsupportedTypeError or UnsupportedValueError. Since our type is very rigid, these errors won't be triggered. - b, _ := json.Marshal(msgFields) - msgPayload := string(b) - t := metav1.Time{Time: time.Now()} - event := &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v-%v.%v", objMeta.Name, time.Now().Unix(), rand.Int()), - }, - Source: v1.EventSource{ - // TODO(vigith): get it from GVK? - Component: "upgrade-manager", - }, - InvolvedObject: v1.ObjectReference{ - Kind: "RollingUpgrade", - Name: objMeta.Name, - Namespace: objMeta.Namespace, - ResourceVersion: objMeta.ResourceVersion, - APIVersion: upgrademgrv1alpha1.GroupVersion.Version, - UID: objMeta.UID, - }, - Reason: string(reason), - Message: msgPayload, - Type: level, - Count: 1, - FirstTimestamp: t, - LastTimestamp: t, - } - - log.Debugf("Publishing event: %v", event) - _event, err := r.generatedClient.CoreV1().Events(objMeta.Namespace).Create(event) - if err != nil { - log.Errorf("Create Events Failed %v, %v", event, err) - } - - return _event -} diff --git a/controllers/events_test.go b/controllers/events_test.go deleted file mode 100644 index 24126029..00000000 --- a/controllers/events_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package controllers - -import ( - "github.com/keikoproj/aws-sdk-go-cache/cache" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - log2 "sigs.k8s.io/controller-runtime/pkg/log" - "testing" - "time" -) - -func Test_createK8sV1Event(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - event := rcRollingUpgrade.createK8sV1Event(ruObj, EventReasonRUStarted, EventLevelNormal, map[string]string{}) - g.Expect(EventReason(event.Reason)).To(gomega.Equal(EventReasonRUStarted)) - - g.Expect(err).To(gomega.BeNil()) -} diff --git a/controllers/helpers.go b/controllers/helpers.go deleted file mode 100644 index a933c373..00000000 --- a/controllers/helpers.go +++ /dev/null @@ -1,187 +0,0 @@ -package controllers - -import ( - "fmt" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/util/intstr" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" - corev1 "k8s.io/api/core/v1" - - "log" - - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" -) - -// getMaxUnavailable calculates and returns the maximum unavailable nodes -// takes an update strategy and total number of nodes as input -func getMaxUnavailable(strategy upgrademgrv1alpha1.UpdateStrategy, totalNodes int) int { - var maxUnavailable int - if strategy.MaxUnavailable.Type == intstr.String { - if strings.Contains(strategy.MaxUnavailable.StrVal, "%") { - maxUnavailable, _ = intstr.GetValueFromIntOrPercent(&strategy.MaxUnavailable, totalNodes, false) - } else { - maxUnavailable, _ = strconv.Atoi(strategy.MaxUnavailable.StrVal) - } - } else { - maxUnavailable = strategy.MaxUnavailable.IntValue() - } - - // setting maxUnavailable to total number of nodes when maxUnavailable is greater than total node count - if totalNodes < maxUnavailable { - log.Printf("Reducing maxUnavailable count from %d to %d as total nodes count is %d", - maxUnavailable, totalNodes, totalNodes) - maxUnavailable = totalNodes - } - // maxUnavailable has to be at least 1 when there are nodes in the ASG - if totalNodes > 0 && maxUnavailable < 1 { - maxUnavailable = 1 - } - return maxUnavailable -} - -func isNodeReady(node corev1.Node) bool { - for _, condition := range node.Status.Conditions { - if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { - return true - } - } - return false -} - -func isNodePassingReadinessGates(node corev1.Node, requiredReadinessGates []upgrademgrv1alpha1.NodeReadinessGate) bool { - if len(requiredReadinessGates) == 0 { - return true - } - for _, gate := range requiredReadinessGates { - for key, value := range gate.MatchLabels { - if node.Labels[key] != value { - return false - } - } - } - return true -} - -func getInServiceCount(instances []*autoscaling.Instance) int64 { - var count int64 - for _, instance := range instances { - if aws.StringValue(instance.LifecycleState) == autoscaling.LifecycleStateInService { - count++ - } - } - return count -} - -func getInServiceIds(instances []*autoscaling.Instance) []string { - list := []string{} - for _, instance := range instances { - if aws.StringValue(instance.LifecycleState) == autoscaling.LifecycleStateInService { - list = append(list, aws.StringValue(instance.InstanceId)) - } - } - return list -} - -func getInstanceStateInASG(group *autoscaling.Group, instanceID string) (string, error) { - for _, instance := range group.Instances { - if aws.StringValue(instance.InstanceId) == instanceID { - return aws.StringValue(instance.LifecycleState), nil - } - } - return "", fmt.Errorf("could not get instance group state, instance %s not found", instanceID) -} - -func isInServiceLifecycleState(state string) bool { - return state == autoscaling.LifecycleStateInService -} - -func tagEC2instance(instanceID, tagKey, tagValue string, client ec2iface.EC2API) error { - input := &ec2.CreateTagsInput{ - Resources: aws.StringSlice([]string{instanceID}), - Tags: []*ec2.Tag{ - { - Key: aws.String(tagKey), - Value: aws.String(tagValue), - }, - }, - } - _, err := client.CreateTags(input) - return err -} - -func getTaggedInstances(tagKey, tagValue string, client ec2iface.EC2API) ([]string, error) { - instances := []string{} - key := fmt.Sprintf("tag:%v", tagKey) - input := &ec2.DescribeInstancesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String(key), - Values: aws.StringSlice([]string{tagValue}), - }, - }, - } - - err := client.DescribeInstancesPages(input, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { - for _, res := range page.Reservations { - for _, instance := range res.Instances { - instances = append(instances, aws.StringValue(instance.InstanceId)) - } - } - return page.NextToken != nil - }) - return instances, err -} - -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} - -// getNextAvailableInstances checks the cluster state store for the instance state -// and returns the next set of instances available for update -func getNextAvailableInstances( - asgName string, - numberOfInstances int, - instances []*autoscaling.Instance, - state ClusterState) []*autoscaling.Instance { - return getNextSetOfAvailableInstancesInAz(asgName, "", numberOfInstances, instances, state) -} - -// getNextSetOfAvailableInstancesInAz checks the cluster state store for the instance state -// and returns the next set of instances available for update in the given AX -func getNextSetOfAvailableInstancesInAz( - asgName string, - azName string, - numberOfInstances int, - instances []*autoscaling.Instance, - state ClusterState, -) []*autoscaling.Instance { - - var instancesForUpdate []*autoscaling.Instance - for instancesFound := 0; instancesFound < numberOfInstances; { - instanceId := state.getNextAvailableInstanceIdInAz(asgName, azName) - if len(instanceId) == 0 { - // All instances are updated, no more instance to update in this AZ - break - } - - // check if the instance picked is part of ASG - for _, instance := range instances { - if *instance.InstanceId == instanceId { - instancesForUpdate = append(instancesForUpdate, instance) - instancesFound++ - } - } - } - return instancesForUpdate -} diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index a7b76e2b..cb3dae4e 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -1,338 +1,402 @@ package controllers import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" + "strings" + "sync" + "testing" + "time" - "github.com/aws/aws-sdk-go/aws" + "k8s.io/client-go/kubernetes/fake" + ctrl "sigs.k8s.io/controller-runtime" - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/onsi/gomega" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + awsprovider "github.com/keikoproj/upgrade-manager/controllers/providers/aws" + kubeprovider "github.com/keikoproj/upgrade-manager/controllers/providers/kubernetes" corev1 "k8s.io/api/core/v1" - "testing" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + //AWS + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" ) -func TestGetMaxUnavailableWithPercentageValue(t *testing.T) { - g := gomega.NewGomegaWithT(t) +// K8s +func createRollingUpgradeReconciler(t *testing.T) *RollingUpgradeReconciler { + // amazon client + amazonClient := createAmazonClient(t) - strategy := upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.Parse("75%"), + // k8s client (fake client) + kubeClient := &kubeprovider.KubernetesClientSet{ + Kubernetes: fake.NewSimpleClientset(createNodeList()), } - g.Expect(getMaxUnavailable(strategy, 200)).To(gomega.Equal(150)) -} -func TestIsNodeReady(t *testing.T) { - g := gomega.NewGomegaWithT(t) - tt := map[corev1.NodeCondition]bool{ - corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionTrue}: true, - corev1.NodeCondition{Type: corev1.NodeReady, Status: corev1.ConditionFalse}: false, + // logger + logger := ctrl.Log.WithName("controllers").WithName("RollingUpgrade") + + // authenticator + auth := &RollingUpgradeAuthenticator{ + KubernetesClientSet: kubeClient, + AmazonClientSet: amazonClient, } - for condition, val := range tt { - node := corev1.Node{ - Status: corev1.NodeStatus{ - Conditions: []corev1.NodeCondition{ - condition, - }, - }, - } - g.Expect(isNodeReady(node)).To(gomega.Equal(val)) + // reconciler object + reconciler := &RollingUpgradeReconciler{ + Logger: logger, + Auth: auth, + EventWriter: kubeprovider.NewEventWriter(kubeClient, logger), + ScriptRunner: ScriptRunner{ + Logger: logger, + }, + DrainGroupMapper: &sync.Map{}, + DrainErrorMapper: &sync.Map{}, } -} + return reconciler -func TestIsNodePassesReadinessGates(t *testing.T) { - g := gomega.NewGomegaWithT(t) +} - type test struct { - gate []map[string]string - labels map[string]string - want bool - } - tests := []test{ - { - gate: []map[string]string{ - { - "healthy": "true", - }, - }, - labels: map[string]string{ - "healthy": "true", - }, - want: true, +func createRollingUpgradeContext(r *RollingUpgradeReconciler) *RollingUpgradeContext { + rollingUpgrade := createRollingUpgrade() + drainGroup, _ := r.DrainGroupMapper.LoadOrStore(rollingUpgrade.NamespacedName(), &sync.WaitGroup{}) + drainErrs, _ := r.DrainErrorMapper.LoadOrStore(rollingUpgrade.NamespacedName(), make(chan error)) + + return &RollingUpgradeContext{ + Logger: r.Logger, + Auth: r.Auth, + ScriptRunner: r.ScriptRunner, + Cloud: NewDiscoveredState(r.Auth, r.Logger), + DrainManager: &DrainManager{ + DrainErrors: drainErrs.(chan error), + DrainGroup: drainGroup.(*sync.WaitGroup), }, + RollingUpgrade: rollingUpgrade, + metricsMutex: &sync.Mutex{}, + } - { - gate: []map[string]string{}, - labels: map[string]string{ - "healthy": "true", - }, - want: true, - }, +} - { - gate: []map[string]string{ - {"healthy": "true"}, +func createRollingUpgrade() *v1alpha1.RollingUpgrade { + return &v1alpha1.RollingUpgrade{ + ObjectMeta: metav1.ObjectMeta{Name: "0", Namespace: "default"}, + Spec: v1alpha1.RollingUpgradeSpec{ + AsgName: "mock-asg-1", + PostDrainDelaySeconds: 30, + Strategy: v1alpha1.UpdateStrategy{ + Type: v1alpha1.RandomUpdateStrategy, }, - labels: map[string]string{ - "healthy": "false", - }, - want: false, }, + } +} - { - gate: []map[string]string{ - {"healthy": "true"}, +func createNodeList() *corev1.NodeList { + return &corev1.NodeList{ + Items: []corev1.Node{ + corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "mock-node-1"}, + Spec: corev1.NodeSpec{ProviderID: "foo-bar/mock-instance-1"}, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + }, }, - labels: map[string]string{}, - want: false, - }, - - { - gate: []map[string]string{ - {"healthy": "true"}, - {"second-check": "true"}, + corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "mock-node-2"}, + Spec: corev1.NodeSpec{ProviderID: "foo-bar/mock-instance-2"}, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + }, }, - labels: map[string]string{ - "healthy": "true", + corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "mock-node-3"}, + Spec: corev1.NodeSpec{ProviderID: "foo-bar/mock-instance-3"}, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + }, }, - want: false, }, - { - gate: []map[string]string{ - {"healthy": "true"}, - {"second-check": "true"}, - }, - labels: map[string]string{ - "healthy": "true", - "second-check": "true", + } +} + +func createNodeSlice() []*corev1.Node { + return []*corev1.Node{ + createNode("mock-node-1"), + createNode("mock-node-2"), + createNode("mock-node-3"), + } +} + +func createNode(name string) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Spec: corev1.NodeSpec{ProviderID: "foo-bar/mock-instance-1"}, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, }, - want: true, }, - { - gate: []map[string]string{ - {"healthy": "true"}, - {"second-check": "true"}, - }, - labels: map[string]string{ - "healthy": "true", - "second-check": "false", - }, - want: false, - }} - - for _, tt := range tests { - readinessGates := make([]upgrademgrv1alpha1.NodeReadinessGate, len(tt.gate)) - for i, g := range tt.gate { - readinessGates[i] = upgrademgrv1alpha1.NodeReadinessGate{ - MatchLabels: g, - } - } - node := corev1.Node{ - ObjectMeta: v1.ObjectMeta{ - Labels: tt.labels, - }, - } - g.Expect(isNodePassingReadinessGates(node, readinessGates)).To(gomega.Equal(tt.want)) } +} +// AWS +type MockAutoscalingGroup struct { + autoscalingiface.AutoScalingAPI + errorFlag bool + awsErr awserr.Error + errorInstanceId string + autoScalingGroups []*autoscaling.Group + Groups map[string]*autoscaling.Group + LaunchConfigurations map[string]*autoscaling.LaunchConfiguration } -func TestGetInServiceCount(t *testing.T) { - g := gomega.NewGomegaWithT(t) - tt := map[*autoscaling.Instance]int64{ - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateInService)}: 1, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateDetached)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateDetaching)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateEnteringStandby)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStatePending)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStatePendingProceed)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStatePendingWait)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateQuarantined)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateStandby)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateTerminated)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateTerminating)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateTerminatingProceed)}: 0, - &autoscaling.Instance{LifecycleState: aws.String(autoscaling.LifecycleStateTerminatingWait)}: 0, - } +type launchTemplateInfo struct { + name *string +} +type MockEC2 struct { + ec2iface.EC2API + LaunchTemplates map[string]*launchTemplateInfo +} - // test every condition - for instance, expectedCount := range tt { - instances := []*autoscaling.Instance{ - instance, - } - g.Expect(getInServiceCount(instances)).To(gomega.Equal(expectedCount)) - } +var _ ec2iface.EC2API = &MockEC2{} - // test all instances - instances := []*autoscaling.Instance{} - for instance := range tt { - instances = append(instances, instance) +func createASGInstance(instanceID string, launchConfigName string) *autoscaling.Instance { + return &autoscaling.Instance{ + InstanceId: &instanceID, + LaunchConfigurationName: &launchConfigName, + AvailabilityZone: aws.String("az-1"), + LifecycleState: aws.String("InService"), } - g.Expect(getInServiceCount(instances)).To(gomega.Equal(int64(1))) } -func TestGetInServiceIds(t *testing.T) { - g := gomega.NewGomegaWithT(t) - tt := map[*autoscaling.Instance][]string{ - &autoscaling.Instance{InstanceId: aws.String("i-1"), LifecycleState: aws.String(autoscaling.LifecycleStateInService)}: {"i-1"}, - &autoscaling.Instance{InstanceId: aws.String("i-2"), LifecycleState: aws.String(autoscaling.LifecycleStateDetached)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-3"), LifecycleState: aws.String(autoscaling.LifecycleStateDetaching)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-4"), LifecycleState: aws.String(autoscaling.LifecycleStateEnteringStandby)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-5"), LifecycleState: aws.String(autoscaling.LifecycleStatePending)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-6"), LifecycleState: aws.String(autoscaling.LifecycleStatePendingProceed)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-7"), LifecycleState: aws.String(autoscaling.LifecycleStatePendingWait)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-8"), LifecycleState: aws.String(autoscaling.LifecycleStateQuarantined)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-9"), LifecycleState: aws.String(autoscaling.LifecycleStateStandby)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-10"), LifecycleState: aws.String(autoscaling.LifecycleStateTerminated)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-11"), LifecycleState: aws.String(autoscaling.LifecycleStateTerminating)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-12"), LifecycleState: aws.String(autoscaling.LifecycleStateTerminatingProceed)}: {}, - &autoscaling.Instance{InstanceId: aws.String("i-13"), LifecycleState: aws.String(autoscaling.LifecycleStateTerminatingWait)}: {}, +func createASGInstanceWithLaunchTemplate(instanceID string, launchTemplateName string) *autoscaling.Instance { + return &autoscaling.Instance{ + InstanceId: &instanceID, + LaunchTemplate: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateName: &launchTemplateName, + }, + AvailabilityZone: aws.String("az-1"), + LifecycleState: aws.String("InService"), } +} - // test every condition - for instance, expectedList := range tt { - instances := []*autoscaling.Instance{ - instance, - } - g.Expect(getInServiceIds(instances)).To(gomega.Equal(expectedList)) +func createEc2Instances() []*ec2.Instance { + return []*ec2.Instance{ + &ec2.Instance{ + InstanceId: aws.String("mock-instance-1"), + }, + &ec2.Instance{ + InstanceId: aws.String("mock-instance-2"), + }, + &ec2.Instance{ + InstanceId: aws.String("mock-instance-3"), + }, } +} - // test all instances - instances := []*autoscaling.Instance{} - for instance := range tt { - instances = append(instances, instance) +func createASG(asgName string, launchConfigName string) *autoscaling.Group { + return &autoscaling.Group{ + AutoScalingGroupName: &asgName, + LaunchConfigurationName: &launchConfigName, + Instances: []*autoscaling.Instance{ + createASGInstance("mock-instance-1", launchConfigName), + createASGInstance("mock-instance-2", launchConfigName), + createASGInstance("mock-instance-3", launchConfigName), + }, + DesiredCapacity: func(x int) *int64 { i := int64(x); return &i }(3), } - g.Expect(getInServiceIds(instances)).To(gomega.Equal([]string{"i-1"})) } -func TestGetMaxUnavailableWithPercentageValue33(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - strategy := upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.Parse("67%"), +func createASGWithLaunchTemplate(asgName string, launchTemplate string) *autoscaling.Group { + return &autoscaling.Group{ + AutoScalingGroupName: &asgName, + LaunchTemplate: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateName: &asgName, + }, + Instances: []*autoscaling.Instance{ + createASGInstance("mock-instance-1", launchTemplate), + createASGInstance("mock-instance-2", launchTemplate), + createASGInstance("mock-instance-3", launchTemplate), + }, + DesiredCapacity: func(x int) *int64 { i := int64(x); return &i }(3), } - g.Expect(getMaxUnavailable(strategy, 3)).To(gomega.Equal(2)) } -func TestGetMaxUnavailableWithPercentageAndSingleInstance(t *testing.T) { - g := gomega.NewGomegaWithT(t) +func createASGWithMixedInstanceLaunchTemplate(asgName string, launchTemplate string) *autoscaling.Group { + return &autoscaling.Group{ + AutoScalingGroupName: &asgName, + MixedInstancesPolicy: &autoscaling.MixedInstancesPolicy{ + LaunchTemplate: &autoscaling.LaunchTemplate{ + LaunchTemplateSpecification: &autoscaling.LaunchTemplateSpecification{ + LaunchTemplateName: &asgName, + }, + }, + }, + Instances: []*autoscaling.Instance{ + createASGInstance("mock-instance-1", launchTemplate), + createASGInstance("mock-instance-2", launchTemplate), + createASGInstance("mock-instance-3", launchTemplate), + }, + DesiredCapacity: func(x int) *int64 { i := int64(x); return &i }(3), + } +} - totalNodes := 1 - strategy := upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.Parse("67%"), +func createDriftedASG(asgName string, launchConfigName string) *autoscaling.Group { + return &autoscaling.Group{ + AutoScalingGroupName: &asgName, + LaunchConfigurationName: &launchConfigName, + Instances: []*autoscaling.Instance{ + createASGInstance("mock-instance-1", "different-launch-config"), + createASGInstance("mock-instance-2", "different-launch-config"), + createASGInstance("mock-instance-3", "different-launch-config"), + }, + DesiredCapacity: func(x int) *int64 { i := int64(x); return &i }(3), } - g.Expect(getMaxUnavailable(strategy, totalNodes)).To(gomega.Equal(1)) } -func TestGetMaxUnavailableWithPercentageNonIntResult(t *testing.T) { - g := gomega.NewGomegaWithT(t) +func createASGs() []*autoscaling.Group { + return []*autoscaling.Group{ + createASG("mock-asg-1", "mock-launch-config-1"), + createDriftedASG("mock-asg-2", "mock-launch-config-2"), + createASG("mock-asg-3", "mock-launch-config-3"), + createASGWithLaunchTemplate("mock-asg-4", "mock-launch-template-4"), + createASGWithMixedInstanceLaunchTemplate("mock-asg-5", "mock-launch-template-5"), + } +} - strategy := upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.Parse("37%"), +func createASGClient() *MockAutoscalingGroup { + return &MockAutoscalingGroup{ + autoScalingGroups: createASGs(), } - g.Expect(getMaxUnavailable(strategy, 50)).To(gomega.Equal(18)) } -func TestGetMaxUnavailableWithIntValue(t *testing.T) { - g := gomega.NewGomegaWithT(t) +func createEc2Client() *MockEC2 { + return &MockEC2{} +} - strategy := upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.Parse("75"), +func createAmazonClient(t *testing.T) *awsprovider.AmazonClientSet { + return &awsprovider.AmazonClientSet{ + AsgClient: createASGClient(), + Ec2Client: createEc2Client(), } - g.Expect(getMaxUnavailable(strategy, 200)).To(gomega.Equal(75)) } -func TestGetNextAvailableInstance(t *testing.T) { - g := gomega.NewGomegaWithT(t) +/******************************* AWS MOCKS *******************************/ - mockAsgName := "some-asg" - mockInstanceName1 := "foo1" - mockInstanceName2 := "bar1" - az := "az-1" - instance1 := autoscaling.Instance{InstanceId: &mockInstanceName1, AvailabilityZone: &az} - instance2 := autoscaling.Instance{InstanceId: &mockInstanceName2, AvailabilityZone: &az} +func (mockAutoscalingGroup MockAutoscalingGroup) TerminateInstanceInAutoScalingGroup(input *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) { + output := &autoscaling.TerminateInstanceInAutoScalingGroupOutput{} + if mockAutoscalingGroup.errorFlag { + if mockAutoscalingGroup.awsErr != nil { + if len(mockAutoscalingGroup.errorInstanceId) <= 0 || + mockAutoscalingGroup.errorInstanceId == *input.InstanceId { + return output, mockAutoscalingGroup.awsErr + } + } + } + asgChange := autoscaling.Activity{ActivityId: aws.String("xxx"), AutoScalingGroupName: aws.String("sss"), Cause: aws.String("xxx"), StartTime: aws.Time(time.Now()), StatusCode: aws.String("200"), StatusMessage: aws.String("success")} + output.Activity = &asgChange + return output, nil +} - instancesList := []*autoscaling.Instance{&instance1, &instance2} - rcRollingUpgrade := &RollingUpgradeReconciler{ClusterState: clusterState} - rcRollingUpgrade.ClusterState.initializeAsg(mockAsgName, instancesList) - available := getNextAvailableInstances(mockAsgName, 1, instancesList, rcRollingUpgrade.ClusterState) +// DescribeLaunchTemplatesPages mocks the describing the launch templates +func (m *MockEC2) DescribeLaunchTemplatesPages(request *ec2.DescribeLaunchTemplatesInput, callback func(*ec2.DescribeLaunchTemplatesOutput, bool) bool) error { + page, err := m.DescribeLaunchTemplates(request) + if err != nil { + return err + } - g.Expect(1).Should(gomega.Equal(len(available))) - g.Expect(rcRollingUpgrade.ClusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) + callback(page, false) + return nil } -func TestGetNextAvailableInstanceNoInstanceFound(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - mockInstanceName1 := "foo1" - mockInstanceName2 := "bar1" - az := "az-1" - instance1 := autoscaling.Instance{InstanceId: &mockInstanceName1, AvailabilityZone: &az} - instance2 := autoscaling.Instance{InstanceId: &mockInstanceName2, AvailabilityZone: &az} +// DescribeLaunchTemplates mocks the describing the launch templates +func (m *MockEC2) DescribeLaunchTemplates(request *ec2.DescribeLaunchTemplatesInput) (*ec2.DescribeLaunchTemplatesOutput, error) { - instancesList := []*autoscaling.Instance{&instance1, &instance2} - rcRollingUpgrade := &RollingUpgradeReconciler{ClusterState: clusterState} - rcRollingUpgrade.ClusterState.initializeAsg(mockAsgName, instancesList) - available := getNextAvailableInstances("asg2", 1, instancesList, rcRollingUpgrade.ClusterState) + o := &ec2.DescribeLaunchTemplatesOutput{} - g.Expect(0).Should(gomega.Equal(len(available))) - g.Expect(rcRollingUpgrade.ClusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) + if m.LaunchTemplates == nil { + return o, nil + } -} + for id, ltInfo := range m.LaunchTemplates { + launchTemplatetName := aws.StringValue(ltInfo.name) -func TestGetNextAvailableInstanceInAz(t *testing.T) { - g := gomega.NewGomegaWithT(t) + allFiltersMatch := true + for _, filter := range request.Filters { + filterName := aws.StringValue(filter.Name) + filterValue := aws.StringValue(filter.Values[0]) - mockAsgName := "some-asg" - mockInstanceName1 := "foo1" - mockInstanceName2 := "bar1" - az := "az-1" - az2 := "az-2" - instance1 := autoscaling.Instance{InstanceId: &mockInstanceName1, AvailabilityZone: &az} - instance2 := autoscaling.Instance{InstanceId: &mockInstanceName2, AvailabilityZone: &az2} + filterMatches := false + if filterName == "tag:Name" && filterValue == launchTemplatetName { + filterMatches = true + } + if strings.HasPrefix(filterName, "tag:kubernetes.io/cluster/") { + filterMatches = true + } - instancesList := []*autoscaling.Instance{&instance1, &instance2} - rcRollingUpgrade := &RollingUpgradeReconciler{ClusterState: clusterState} - rcRollingUpgrade.ClusterState.initializeAsg(mockAsgName, instancesList) + if !filterMatches { + allFiltersMatch = false + break + } + } - instances := getNextSetOfAvailableInstancesInAz(mockAsgName, az, 1, instancesList, rcRollingUpgrade.ClusterState) - g.Expect(1).Should(gomega.Equal(len(instances))) - g.Expect(mockInstanceName1).Should(gomega.Equal(*instances[0].InstanceId)) + if allFiltersMatch { + o.LaunchTemplates = append(o.LaunchTemplates, &ec2.LaunchTemplate{ + LaunchTemplateName: aws.String(launchTemplatetName), + LaunchTemplateId: aws.String(id), + }) + } + } - instances = getNextSetOfAvailableInstancesInAz(mockAsgName, az2, 1, instancesList, rcRollingUpgrade.ClusterState) - g.Expect(1).Should(gomega.Equal(len(instances))) - g.Expect(mockInstanceName2).Should(gomega.Equal(*instances[0].InstanceId)) + return o, nil +} - instances = getNextSetOfAvailableInstancesInAz(mockAsgName, "az3", 1, instancesList, rcRollingUpgrade.ClusterState) - g.Expect(0).Should(gomega.Equal(len(instances))) +func (m *MockAutoscalingGroup) DescribeAutoScalingGroupsPages(request *autoscaling.DescribeAutoScalingGroupsInput, callback func(*autoscaling.DescribeAutoScalingGroupsOutput, bool) bool) error { + // For the mock, we just send everything in one page + page, err := m.DescribeAutoScalingGroups(request) + if err != nil { + return err + } - g.Expect(rcRollingUpgrade.ClusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) + callback(page, false) + return nil } -func TestGetNextAvailableInstanceInAzGetMultipleInstances(t *testing.T) { - g := gomega.NewGomegaWithT(t) +func (m *MockAutoscalingGroup) DescribeAutoScalingGroups(input *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { + return &autoscaling.DescribeAutoScalingGroupsOutput{ + AutoScalingGroups: createASGs(), + }, nil +} - mockAsgName := "some-asg" - mockInstanceName1 := "foo1" - mockInstanceName2 := "bar1" - az := "az-1" - instance1 := autoscaling.Instance{InstanceId: &mockInstanceName1, AvailabilityZone: &az} - instance2 := autoscaling.Instance{InstanceId: &mockInstanceName2, AvailabilityZone: &az} +func (m *MockEC2) DescribeInstancesPages(request *ec2.DescribeInstancesInput, callback func(*ec2.DescribeInstancesOutput, bool) bool) error { + // For the mock, we just send everything in one page + page, err := m.DescribeInstances(request) + if err != nil { + return err + } - instancesList := []*autoscaling.Instance{&instance1, &instance2} - rcRollingUpgrade := &RollingUpgradeReconciler{ClusterState: clusterState} - rcRollingUpgrade.ClusterState.initializeAsg(mockAsgName, instancesList) + callback(page, false) - instances := getNextSetOfAvailableInstancesInAz(mockAsgName, az, 3, instancesList, rcRollingUpgrade.ClusterState) + return nil +} - // Even though the request is for 3 instances, only 2 should be returned as there are only 2 nodes in the ASG - g.Expect(2).Should(gomega.Equal(len(instances))) - instanceIds := []string{*instances[0].InstanceId, *instances[1].InstanceId} - g.Expect(instanceIds).Should(gomega.ConsistOf(mockInstanceName1, mockInstanceName2)) +func (m *MockEC2) DescribeInstances(*ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) { + return &ec2.DescribeInstancesOutput{ + Reservations: []*ec2.Reservation{ + &ec2.Reservation{Instances: createEc2Instances()}, + }, + }, nil +} - g.Expect(rcRollingUpgrade.ClusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) +func (mockAutoscalingGroup MockAutoscalingGroup) EnterStandby(_ *autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) { + output := &autoscaling.EnterStandbyOutput{} + return output, nil } diff --git a/controllers/launch_definition.go b/controllers/launch_definition.go deleted file mode 100644 index 4fb902be..00000000 --- a/controllers/launch_definition.go +++ /dev/null @@ -1,27 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" -) - -// launchDefinition describes how instances are launched in ASG. -// Supports LaunchConfiguration and LaunchTemplate. -type launchDefinition struct { - // launchConfigurationName is name of LaunchConfiguration used by ASG. - // +optional - launchConfigurationName *string - // launchTemplate is Launch template definition used for ASG. - // +optional - launchTemplate *autoscaling.LaunchTemplateSpecification -} - -func NewLaunchDefinition(asg *autoscaling.Group) *launchDefinition { - template := asg.LaunchTemplate - if template == nil && asg.MixedInstancesPolicy != nil { - template = asg.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification - } - return &launchDefinition{ - launchConfigurationName: asg.LaunchConfigurationName, - launchTemplate: template, - } -} diff --git a/controllers/metrics.go b/controllers/metrics.go new file mode 100644 index 00000000..337b91d2 --- /dev/null +++ b/controllers/metrics.go @@ -0,0 +1,128 @@ +package controllers + +import ( + "time" + + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "github.com/keikoproj/upgrade-manager/controllers/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Update metrics status UpdateMetricsStatus +func (s *RollingUpgradeContext) UpdateMetricsStatus(batchNodes map[string]*v1alpha1.NodeInProcessing, nodeSteps map[string][]v1alpha1.NodeStepDuration) { + s.UpdateLastBatchNodes(batchNodes) + s.UpdateStatistics(nodeSteps) +} + +// Update last batch nodes +func (s *RollingUpgradeContext) UpdateLastBatchNodes(batchNodes map[string]*v1alpha1.NodeInProcessing) { + s.RollingUpgrade.Status.NodeInProcessing = batchNodes + keys := make([]string, 0, len(batchNodes)) + for k := range batchNodes { + keys = append(keys, k) + } + s.RollingUpgrade.Status.LastBatchNodes = keys +} + +// Update Node Statistics +func (s *RollingUpgradeContext) UpdateStatistics(nodeSteps map[string][]v1alpha1.NodeStepDuration) { + for _, v := range nodeSteps { + for _, step := range v { + s.AddNodeStepDuration(step) + } + } +} + +// Add one step duration +func (s *RollingUpgradeContext) AddNodeStepDuration(nsd v1alpha1.NodeStepDuration) { + // if step exists, add count and sum, otherwise append + for _, s := range s.RollingUpgrade.Status.Statistics { + if s.StepName == nsd.StepName { + s.DurationSum = metav1.Duration{ + Duration: s.DurationSum.Duration + nsd.Duration.Duration, + } + s.DurationCount += 1 + return + } + } + s.RollingUpgrade.Status.Statistics = append(s.RollingUpgrade.Status.Statistics, &v1alpha1.RollingUpgradeStatistics{ + StepName: nsd.StepName, + DurationSum: metav1.Duration{ + Duration: nsd.Duration.Duration, + }, + DurationCount: 1, + }) +} + +// Add one step duration +func (s *RollingUpgradeContext) ToStepDuration(groupName, nodeName string, stepName v1alpha1.RollingUpgradeStep, duration time.Duration) v1alpha1.NodeStepDuration { + //Add to system level statistics + common.AddStepDuration(groupName, string(stepName), duration) + return v1alpha1.NodeStepDuration{ + GroupName: groupName, + NodeName: nodeName, + StepName: stepName, + Duration: metav1.Duration{ + Duration: duration, + }, + } +} + +func (s *RollingUpgradeContext) DoNodeStep(InProcessingNodes map[string]*v1alpha1.NodeInProcessing, + nodeSteps map[string][]v1alpha1.NodeStepDuration, groupName, nodeName string, stepName v1alpha1.RollingUpgradeStep, endTime metav1.Time) { + + var inProcessingNode *v1alpha1.NodeInProcessing + if n, ok := InProcessingNodes[nodeName]; !ok { + inProcessingNode = &v1alpha1.NodeInProcessing{ + NodeName: nodeName, + StepName: stepName, + UpgradeStartTime: metav1.Now(), + StepStartTime: metav1.Now(), + } + InProcessingNodes[nodeName] = inProcessingNode + } else { + inProcessingNode = n + } + + inProcessingNode.StepEndTime = endTime + var duration = inProcessingNode.StepEndTime.Sub(inProcessingNode.StepStartTime.Time) + if stepName == v1alpha1.NodeRotationCompleted { + //Add overall and remove the node from in-processing map + var total = inProcessingNode.StepEndTime.Sub(inProcessingNode.UpgradeStartTime.Time) + duration1 := s.ToStepDuration(groupName, nodeName, inProcessingNode.StepName, duration) + duration2 := s.ToStepDuration(groupName, nodeName, v1alpha1.NodeRotationTotal, total) + s.addNodeStepDuration(nodeSteps, nodeName, duration1) + s.addNodeStepDuration(nodeSteps, nodeName, duration2) + s.metricsMutex.Lock() + delete(InProcessingNodes, nodeName) + s.metricsMutex.Unlock() + } else if inProcessingNode.StepName != stepName { //Still same step + var oldOrder = v1alpha1.NodeRotationStepOrders[inProcessingNode.StepName] + var newOrder = v1alpha1.NodeRotationStepOrders[stepName] + if newOrder > oldOrder { //Make sure the steps running in order + stepDuration := s.ToStepDuration(groupName, nodeName, inProcessingNode.StepName, duration) + inProcessingNode.StepStartTime = endTime + inProcessingNode.StepName = stepName + s.addNodeStepDuration(nodeSteps, nodeName, stepDuration) + } + } +} + +// Node turns onto step +func (s *RollingUpgradeContext) NodeStep(InProcessingNodes map[string]*v1alpha1.NodeInProcessing, + nodeSteps map[string][]v1alpha1.NodeStepDuration, groupName, nodeName string, stepName v1alpha1.RollingUpgradeStep) { + s.DoNodeStep(InProcessingNodes, nodeSteps, groupName, nodeName, stepName, metav1.Now()) +} + +func (s *RollingUpgradeContext) addNodeStepDuration(steps map[string][]v1alpha1.NodeStepDuration, nodeName string, nsd v1alpha1.NodeStepDuration) { + s.metricsMutex.Lock() + if stepDuration, ok := steps[nodeName]; !ok { + steps[nodeName] = []v1alpha1.NodeStepDuration{ + nsd, + } + } else { + stepDuration = append(stepDuration, nsd) + steps[nodeName] = stepDuration + } + s.metricsMutex.Unlock() +} diff --git a/controllers/metrics_test.go b/controllers/metrics_test.go new file mode 100644 index 00000000..a55d7025 --- /dev/null +++ b/controllers/metrics_test.go @@ -0,0 +1,53 @@ +package controllers + +import ( + "testing" + + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "github.com/onsi/gomega" +) + +// Test +func TestNodeTurnsOntoStep(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + reconsiler := createRollingUpgradeReconciler(t) + r := createRollingUpgradeContext(reconsiler) + + //A map to retain the steps for multiple nodes + nodeSteps := make(map[string][]v1alpha1.NodeStepDuration) + inProcessingNodes := make(map[string]*v1alpha1.NodeInProcessing) + + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", v1alpha1.NodeRotationKickoff) + + g.Expect(inProcessingNodes).NotTo(gomega.BeNil()) + g.Expect(nodeSteps["node-1"]).To(gomega.BeNil()) + + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", v1alpha1.NodeRotationDesiredNodeReady) + + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) + g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(v1alpha1.NodeRotationKickoff)) + + //Retry desired_node_ready + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", v1alpha1.NodeRotationDesiredNodeReady) + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) + g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(v1alpha1.NodeRotationKickoff)) + + //Retry desired_node_ready again + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", v1alpha1.NodeRotationDesiredNodeReady) + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(1)) + g.Expect(nodeSteps["node-1"][0].StepName).To(gomega.Equal(v1alpha1.NodeRotationKickoff)) + + //Completed + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-1", v1alpha1.NodeRotationCompleted) + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) + g.Expect(nodeSteps["node-1"][1].StepName).To(gomega.Equal(v1alpha1.NodeRotationDesiredNodeReady)) + g.Expect(nodeSteps["node-1"][2].StepName).To(gomega.Equal(v1alpha1.NodeRotationTotal)) + + //Second node + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-2", v1alpha1.NodeRotationKickoff) + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) + + r.NodeStep(inProcessingNodes, nodeSteps, "test-asg", "node-2", v1alpha1.NodeRotationDesiredNodeReady) + g.Expect(len(nodeSteps["node-1"])).To(gomega.Equal(3)) +} diff --git a/controllers/node_selector.go b/controllers/node_selector.go deleted file mode 100644 index 8d2cd241..00000000 --- a/controllers/node_selector.go +++ /dev/null @@ -1,19 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" -) - -type NodeSelector interface { - SelectNodesForRestack(state ClusterState) []*autoscaling.Instance -} - -func getNodeSelector(asg *autoscaling.Group, ruObj *upgrademgrv1alpha1.RollingUpgrade) NodeSelector { - switch ruObj.Spec.Strategy.Type { - case upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy: - return NewUniformAcrossAzNodeSelector(asg, ruObj) - default: - return NewRandomNodeSelector(asg, ruObj) - } -} diff --git a/controllers/node_selector_test.go b/controllers/node_selector_test.go deleted file mode 100644 index 4eeae7a1..00000000 --- a/controllers/node_selector_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" -) - -func TestGetRandomNodeSelector(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg}, - } - - nodeSelector := getNodeSelector(&mockAsg, ruObj) - - g.Expect(nodeSelector).Should(gomega.BeAssignableToTypeOf(&RandomNodeSelector{})) -} - -func TestGetUniformAcrossAzNodeSelector(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy, - }, - }, - } - - nodeSelector := getNodeSelector(&mockAsg, ruObj) - - g.Expect(nodeSelector).Should(gomega.BeAssignableToTypeOf(&UniformAcrossAzNodeSelector{})) -} - -func TestGetNodeSelectorWithInvalidStrategy(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Type: "invalid", - }, - }, - } - - nodeSelector := getNodeSelector(&mockAsg, ruObj) - - g.Expect(nodeSelector).Should(gomega.BeAssignableToTypeOf(&RandomNodeSelector{})) -} diff --git a/controllers/providers/aws/autoscaling.go b/controllers/providers/aws/autoscaling.go new file mode 100644 index 00000000..e461260a --- /dev/null +++ b/controllers/providers/aws/autoscaling.go @@ -0,0 +1,72 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" +) + +var ( + TerminatingInstanceStates = []string{ + autoscaling.LifecycleStateTerminating, + autoscaling.LifecycleStateTerminatingWait, + autoscaling.LifecycleStateTerminatingProceed, + autoscaling.LifecycleStateTerminated, + autoscaling.LifecycleStateWarmedTerminating, + autoscaling.LifecycleStateWarmedTerminatingWait, + autoscaling.LifecycleStateWarmedTerminatingProceed, + autoscaling.LifecycleStateWarmedTerminated, + } + // Instance standBy limit is enforced by AWS EnterStandBy API + InstanceStandByLimit = 19 +) + +func (a *AmazonClientSet) DescribeScalingGroups() ([]*autoscaling.Group, error) { + scalingGroups := []*autoscaling.Group{} + err := a.AsgClient.DescribeAutoScalingGroupsPages(&autoscaling.DescribeAutoScalingGroupsInput{}, func(page *autoscaling.DescribeAutoScalingGroupsOutput, lastPage bool) bool { + scalingGroups = append(scalingGroups, page.AutoScalingGroups...) + return page.NextToken != nil + }) + if err != nil { + return scalingGroups, err + } + return scalingGroups, nil +} + +func (a *AmazonClientSet) TerminateInstance(instance *autoscaling.Instance) error { + instanceID := aws.StringValue(instance.InstanceId) + input := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ + InstanceId: aws.String(instanceID), + ShouldDecrementDesiredCapacity: aws.Bool(false), + } + + if _, err := a.AsgClient.TerminateInstanceInAutoScalingGroup(input); err != nil { + return err + } + return nil +} + +func (a *AmazonClientSet) SetInstancesStandBy(instanceIDs []string, scalingGroupName string) error { + input := &autoscaling.EnterStandbyInput{ + AutoScalingGroupName: aws.String(scalingGroupName), + InstanceIds: aws.StringSlice(instanceIDs), + ShouldDecrementDesiredCapacity: aws.Bool(false), + } + _, err := a.AsgClient.EnterStandby(input) + return err +} diff --git a/controllers/providers/aws/ec2.go b/controllers/providers/aws/ec2.go new file mode 100644 index 00000000..51cc0dd1 --- /dev/null +++ b/controllers/providers/aws/ec2.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func (a *AmazonClientSet) DescribeLaunchTemplates() ([]*ec2.LaunchTemplate, error) { + launchTemplates := []*ec2.LaunchTemplate{} + err := a.Ec2Client.DescribeLaunchTemplatesPages(&ec2.DescribeLaunchTemplatesInput{}, func(page *ec2.DescribeLaunchTemplatesOutput, lastPage bool) bool { + launchTemplates = append(launchTemplates, page.LaunchTemplates...) + return page.NextToken != nil + }) + if err != nil { + return launchTemplates, err + } + return launchTemplates, nil +} + +func (a *AmazonClientSet) DescribeTaggedInstanceIDs(tagKey, tagValue string) ([]string, error) { + instances := []string{} + key := fmt.Sprintf("tag:%v", tagKey) + input := &ec2.DescribeInstancesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String(key), + Values: aws.StringSlice([]string{tagValue}), + }, + { + Name: aws.String("instance-state-name"), + Values: aws.StringSlice([]string{"pending", "running"}), + }, + }, + } + + err := a.Ec2Client.DescribeInstancesPages(input, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { + for _, res := range page.Reservations { + for _, instance := range res.Instances { + instances = append(instances, aws.StringValue(instance.InstanceId)) + } + } + return page.NextToken != nil + }) + return instances, err +} + +func (a *AmazonClientSet) TagEC2instances(instanceIDs []string, tagKey, tagValue string) error { + input := &ec2.CreateTagsInput{ + Resources: aws.StringSlice(instanceIDs), + Tags: []*ec2.Tag{ + { + Key: aws.String(tagKey), + Value: aws.String(tagValue), + }, + }, + } + _, err := a.Ec2Client.CreateTags(input) + return err +} diff --git a/controllers/providers/aws/utils.go b/controllers/providers/aws/utils.go new file mode 100644 index 00000000..b5b46874 --- /dev/null +++ b/controllers/providers/aws/utils.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "fmt" + "os" + "sort" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ec2/ec2iface" +) + +type AmazonClientSet struct { + AsgClient autoscalingiface.AutoScalingAPI + Ec2Client ec2iface.EC2API +} + +func DeriveRegion() (string, error) { + + if region := os.Getenv("AWS_REGION"); region != "" { + return region, nil + } + + var config aws.Config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + Config: config, + })) + c := ec2metadata.New(sess) + region, err := c.Region() + if err != nil { + return "", fmt.Errorf("cannot reach ec2metadata, if running locally export AWS_REGION: %w", err) + } + return region, nil +} + +func SelectScalingGroup(name string, groups []*autoscaling.Group) *autoscaling.Group { + for _, group := range groups { + groupName := aws.StringValue(group.AutoScalingGroupName) + if strings.EqualFold(groupName, name) { + return group + } + } + return &autoscaling.Group{} +} + +func SelectScalingGroupInstance(instanceID string, group *autoscaling.Group) *autoscaling.Instance { + for _, instance := range group.Instances { + selectedID := aws.StringValue(instance.InstanceId) + if strings.EqualFold(instanceID, selectedID) { + return instance + } + } + return &autoscaling.Instance{} +} + +func GetScalingAZs(instances []*autoscaling.Instance) []string { + AZs := make([]string, 0) + for _, instance := range instances { + AZ := aws.StringValue(instance.AvailabilityZone) + AZs = append(AZs, AZ) + } + sort.Strings(AZs) + return AZs +} + +func GetInstanceIDs(instances []*autoscaling.Instance) []string { + IDs := make([]string, 0) + for _, instance := range instances { + ID := aws.StringValue(instance.InstanceId) + IDs = append(IDs, ID) + } + sort.Strings(IDs) + return IDs +} + +func GetTemplateLatestVersion(templates []*ec2.LaunchTemplate, templateName string) string { + for _, template := range templates { + name := aws.StringValue(template.LaunchTemplateName) + if strings.EqualFold(name, templateName) { + versionInt := aws.Int64Value(template.LatestVersionNumber) + return strconv.FormatInt(versionInt, 10) + } + } + return "0" +} + +func GetInServiceInstanceIDs(instances []*autoscaling.Instance) []string { + var inServiceInstanceIDs []string + for _, instance := range instances { + if aws.StringValue(instance.LifecycleState) == autoscaling.LifecycleStateInService { + inServiceInstanceIDs = append(inServiceInstanceIDs, aws.StringValue(instance.InstanceId)) + } + } + return inServiceInstanceIDs + +} diff --git a/controllers/providers/kubernetes/events.go b/controllers/providers/kubernetes/events.go new file mode 100644 index 00000000..6c9ac2bd --- /dev/null +++ b/controllers/providers/kubernetes/events.go @@ -0,0 +1,97 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "encoding/json" + "fmt" + "math/rand" + "time" + + "github.com/go-logr/logr" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type EventWriter struct { + *KubernetesClientSet + logr.Logger +} + +func NewEventWriter(k *KubernetesClientSet, logger logr.Logger) *EventWriter { + return &EventWriter{ + KubernetesClientSet: k, + Logger: logger, + } +} + +// EventReason defines the reason of an event +type EventReason string + +// EventLevel defines the level of an event +type EventLevel string + +const ( + // EventLevelNormal is the level of a normal event + EventLevelNormal = "Normal" + // EventLevelWarning is the level of a warning event + EventLevelWarning = "Warning" + // EventReasonRUStarted Rolling Upgrade Started + EventReasonRUStarted EventReason = "RollingUpgradeStarted" + // EventReasonRUInstanceStarted Rolling Upgrade for Instance has started + EventReasonRUInstanceStarted EventReason = "RollingUpgradeInstanceStarted" + // EventReasonRUInstanceFinished Rolling Upgrade for Instance has finished + EventReasonRUInstanceFinished EventReason = "RollingUpgradeInstanceFinished" + // EventReasonRUFinished Rolling Upgrade Finished + EventReasonRUFinished EventReason = "RollingUpgradeFinished" +) + +func (w *EventWriter) CreateEvent(rollingUpgrade *v1alpha1.RollingUpgrade, reason EventReason, level string, msgFields map[string]string) { + b, _ := json.Marshal(msgFields) + msgPayload := string(b) + t := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v-%v.%v", rollingUpgrade.GetName(), time.Now().Unix(), rand.Int()), + }, + Source: corev1.EventSource{ + Component: "upgrade-manager", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "RollingUpgrade", + Name: rollingUpgrade.GetName(), + Namespace: rollingUpgrade.GetNamespace(), + ResourceVersion: rollingUpgrade.GetResourceVersion(), + APIVersion: v1alpha1.GroupVersion.Version, + UID: rollingUpgrade.GetUID(), + }, + Reason: string(reason), + Message: msgPayload, + Type: level, + Count: 1, + FirstTimestamp: t, + LastTimestamp: t, + } + + w.V(1).Info("publishing event", "event", event) + _, err := w.Kubernetes.CoreV1().Events(rollingUpgrade.GetNamespace()).Create(context.Background(), event, metav1.CreateOptions{}) + if err != nil { + w.Error(err, "failed to publish event") + } +} diff --git a/controllers/providers/kubernetes/nodes.go b/controllers/providers/kubernetes/nodes.go new file mode 100644 index 00000000..7a24ee5b --- /dev/null +++ b/controllers/providers/kubernetes/nodes.go @@ -0,0 +1,77 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "context" + "fmt" + "os" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + drain "k8s.io/kubectl/pkg/drain" +) + +// ListClusterNodes gets a list of all nodes in the cluster +func (k *KubernetesClientSet) ListClusterNodes() (*corev1.NodeList, error) { + var nodes *corev1.NodeList + nodes, err := k.Kubernetes.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + if err != nil || nodes == nil { + return &corev1.NodeList{}, err + } + return nodes, nil +} + +// DrainNode cordons and drains a node. +func (k *KubernetesClientSet) DrainNode(node *corev1.Node, PostDrainDelaySeconds time.Duration, DrainTimeout int, client kubernetes.Interface) error { + if client == nil { + return fmt.Errorf("K8sClient not set") + } + + if node == nil { + return fmt.Errorf("node not set") + } + + helper := &drain.Helper{ + Client: client, + Force: true, + GracePeriodSeconds: -1, + IgnoreAllDaemonSets: true, + Out: os.Stdout, + ErrOut: os.Stdout, + DeleteEmptyDirData: true, + Timeout: time.Duration(DrainTimeout) * time.Second, + } + + if err := drain.RunCordonOrUncordon(helper, node, true); err != nil { + if apierrors.IsNotFound(err) { + return err + } + return fmt.Errorf("error cordoning node: %v", err) + } + + if err := drain.RunNodeDrain(helper, node.Name); err != nil { + if apierrors.IsNotFound(err) { + return err + } + return fmt.Errorf("error draining node: %v", err) + } + return nil +} diff --git a/controllers/providers/kubernetes/utils.go b/controllers/providers/kubernetes/utils.go new file mode 100644 index 00000000..abeddd92 --- /dev/null +++ b/controllers/providers/kubernetes/utils.go @@ -0,0 +1,129 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubernetes + +import ( + "fmt" + "os" + "os/user" + "strings" + + corev1 "k8s.io/api/core/v1" + + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// Placeholder Kubernetes helper functions + +type KubernetesClientSet struct { + Kubernetes kubernetes.Interface +} + +func GetKubernetesClient() (kubernetes.Interface, error) { + var config *rest.Config + config, err := GetKubernetesConfig() + if err != nil { + return nil, err + } + client, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return client, nil +} + +func GetKubernetesConfig() (*rest.Config, error) { + var config *rest.Config + config, err := rest.InClusterConfig() + if err != nil { + config, err = GetKubernetesLocalConfig() + if err != nil { + return nil, err + } + return config, nil + } + return config, nil +} + +func GetKubernetesLocalConfig() (*rest.Config, error) { + var kubePath string + if os.Getenv("KUBECONFIG") != "" { + kubePath = os.Getenv("KUBECONFIG") + } else { + usr, err := user.Current() + if err != nil { + return nil, err + } + kubePath = usr.HomeDir + "/.kube/config" + } + + if kubePath == "" { + err := fmt.Errorf("failed to get kubeconfig path") + return nil, err + } + + config, err := clientcmd.BuildConfigFromFlags("", kubePath) + if err != nil { + return nil, err + } + return config, nil +} + +func SelectNodeByInstanceID(instanceID string, nodes []*corev1.Node) *corev1.Node { + for _, node := range nodes { + nodeID := GetNodeInstanceID(node) + if strings.EqualFold(instanceID, nodeID) { + return node + } + } + return nil +} + +func GetNodeInstanceID(node *corev1.Node) string { + if node != nil { + tokens := strings.Split(node.Spec.ProviderID, "/") + nodeInstanceID := tokens[len(tokens)-1] + return nodeInstanceID + } + return "" +} + +func IsNodeReady(node *corev1.Node) bool { + for _, condition := range node.Status.Conditions { + if condition.Type == corev1.NodeReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func IsNodePassesReadinessGates(node *corev1.Node, requiredReadinessGates []v1alpha1.NodeReadinessGate) bool { + if len(requiredReadinessGates) == 0 { + return true + } + for _, gate := range requiredReadinessGates { + for key, value := range gate.MatchLabels { + if node.Labels[key] != value { + return false + } + } + } + return true +} diff --git a/controllers/random_node_selector.go b/controllers/random_node_selector.go deleted file mode 100644 index 0916de45..00000000 --- a/controllers/random_node_selector.go +++ /dev/null @@ -1,27 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "log" -) - -type RandomNodeSelector struct { - maxUnavailable int - ruObj *upgrademgrv1alpha1.RollingUpgrade - asg *autoscaling.Group -} - -func NewRandomNodeSelector(asg *autoscaling.Group, ruObj *upgrademgrv1alpha1.RollingUpgrade) *RandomNodeSelector { - maxUnavailable := getMaxUnavailable(ruObj.Spec.Strategy, len(asg.Instances)) - log.Printf("Max unavailable calculated for %s is %d", ruObj.Name, maxUnavailable) - return &RandomNodeSelector{ - maxUnavailable: maxUnavailable, - ruObj: ruObj, - asg: asg, - } -} - -func (selector *RandomNodeSelector) SelectNodesForRestack(state ClusterState) []*autoscaling.Instance { - return getNextAvailableInstances(selector.ruObj.Spec.AsgName, selector.maxUnavailable, selector.asg.Instances, state) -} diff --git a/controllers/rollingupgrade_controller.go b/controllers/rollingupgrade_controller.go index 3c38cf76..e8372010 100644 --- a/controllers/rollingupgrade_controller.go +++ b/controllers/rollingupgrade_controller.go @@ -1,11 +1,9 @@ /* - +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,1250 +16,246 @@ package controllers import ( "context" "fmt" - "strconv" "strings" "sync" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/go-logr/logr" "github.com/keikoproj/aws-sdk-go-cache/cache" - iebackoff "github.com/keikoproj/inverse-exp-backoff" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "github.com/keikoproj/upgrade-manager/controllers/common" + "github.com/keikoproj/upgrade-manager/controllers/common/log" + awsprovider "github.com/keikoproj/upgrade-manager/controllers/providers/aws" + kubeprovider "github.com/keikoproj/upgrade-manager/controllers/providers/kubernetes" corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/keikoproj/upgrade-manager/api/v1alpha1" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/keikoproj/upgrade-manager/controllers/common" -) - -const ( - // JanitorAnnotation is for completed objects. - JanitorAnnotation = "janitor/ttl" - // ClearCompletedFrequency is the time after which a completed rollingUpgrade object is deleted. - ClearCompletedFrequency = "1d" - // ClearErrorFrequency is the time after which an errored rollingUpgrade object is deleted. - ClearErrorFrequency = "7d" - // EC2StateTagKey is the EC2 tag key for indicating the state - EC2StateTagKey = "upgrademgr.keikoproj.io/state" - - // Environment variable keys - asgNameKey = "ASG_NAME" - instanceIDKey = "INSTANCE_ID" - instanceNameKey = "INSTANCE_NAME" - - // InService is a state of an instance - InService = "InService" -) - -var ( - // TerminationTimeoutSeconds is the timeout threshold for waiting for a node object unjoin - TerminationTimeoutSeconds = 3600 - // TerminationSleepIntervalSeconds is the polling interval for checking if a node object is unjoined - TerminationSleepIntervalSeconds = 30 - // WaiterMaxDelay is the maximum delay for waiters inverse exponential backoff - WaiterMaxDelay = time.Second * 90 - // WaiterMinDelay is the minimum delay for waiters inverse exponential backoff - WaiterMinDelay = time.Second * 15 - // WaiterFactor is the delay reduction factor per retry - WaiterFactor = 0.5 - // WaiterMaxAttempts is the maximum number of retries for waiters - WaiterMaxAttempts = uint32(32) - // CacheTTL is ttl for ASG cache. - CacheTTL = 30 * time.Second + "sigs.k8s.io/controller-runtime/pkg/source" ) // RollingUpgradeReconciler reconciles a RollingUpgrade object type RollingUpgradeReconciler struct { client.Client - Log logr.Logger - EC2Client ec2iface.EC2API - ASGClient autoscalingiface.AutoScalingAPI - generatedClient *kubernetes.Clientset - NodeList *corev1.NodeList - LaunchTemplates []*ec2.LaunchTemplate - inProcessASGs sync.Map - admissionMap sync.Map - ruObjNameToASG AsgCache - ClusterState ClusterState - maxParallel int - CacheConfig *cache.Config - ScriptRunner ScriptRunner -} - -type AsgCache struct { - cache sync.Map -} - -func (c *AsgCache) IsExpired(name string) bool { - if val, ok := c.cache.Load(name); !ok { - return true - } else { - cached := val.(CachedValue) - return time.Now().After(cached.expiration) - } -} - -func (c *AsgCache) Load(name string) (*autoscaling.Group, bool) { - if val, ok := c.cache.Load(name); !ok { - return nil, false - } else { - cached := val.(CachedValue) - return cached.val.(*autoscaling.Group), true - } -} - -func (c *AsgCache) Store(name string, asg *autoscaling.Group) { - c.cache.Store(name, CachedValue{asg, time.Now().Add(CacheTTL)}) -} - -func (c *AsgCache) Delete(name string) { - c.cache.Delete(name) -} - -type CachedValue struct { - val interface{} - expiration time.Time -} - -func (r *RollingUpgradeReconciler) SetMaxParallel(max int) { - if max >= 1 { - r.Log.Info(fmt.Sprintf("max parallel reconciles = %v", max)) - r.maxParallel = max - } -} - -func (r *RollingUpgradeReconciler) preDrainHelper(instanceID, nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - return r.ScriptRunner.PreDrain(instanceID, nodeName, ruObj) -} - -// Operates on any scripts that were provided after the draining of the node. -// kubeCtlCall is provided as an argument to decouple the method from the actual kubectl call -func (r *RollingUpgradeReconciler) postDrainHelper(instanceID, - nodeName string, - ruObj *upgrademgrv1alpha1.RollingUpgrade, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) error { - err := r.ScriptRunner.PostDrain(instanceID, nodeName, ruObj) - if err != nil { - return err - } - - r.info(ruObj, "Waiting for postDrainDelay", "postDrainDelay", ruObj.Spec.PostDrainDelaySeconds) - time.Sleep(time.Duration(ruObj.Spec.PostDrainDelaySeconds) * time.Second) - - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostWait, mutex) - - return r.ScriptRunner.PostWait(instanceID, nodeName, ruObj) - -} - -// DrainNode runs "kubectl drain" on the given node -// kubeCtlCall is provided as an argument to decouple the method from the actual kubectl call -func (r *RollingUpgradeReconciler) DrainNode(ruObj *upgrademgrv1alpha1.RollingUpgrade, - nodeName string, - instanceID string, - drainTimeout int, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) error { - - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationPredrainScript, mutex) - - // Running kubectl drain node. - err := r.preDrainHelper(instanceID, nodeName, ruObj) - if err != nil { - return fmt.Errorf("%s: pre-drain script failed: %w", ruObj.NamespacedName(), err) - } - - errChan := make(chan error) - ctx := context.TODO() - var cancel context.CancelFunc - - // Add a context with timeout only if a valid drain timeout value is specified - // default value used for drain timeout is -1 - if drainTimeout >= 0 { - r.info(ruObj, "Creating a context with timeout", "drainTimeout", drainTimeout) - // Define a cancellation after drainTimeout - ctx, cancel = context.WithTimeout(ctx, time.Duration(drainTimeout)*time.Second) - defer cancel() - } else { - r.info(ruObj, "Skipped creating context with timeout.", "drainTimeout", drainTimeout) - } - - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationDrain, mutex) - - r.info(ruObj, "Invoking kubectl drain for the node", "nodeName", nodeName) - go r.CallKubectlDrain(nodeName, ruObj, errChan) - - // Listening to signals from the CallKubectlDrain go routine - select { - case <-ctx.Done(): - r.error(ruObj, ctx.Err(), "Kubectl drain timed out for node", "nodeName", nodeName) - if !ruObj.Spec.IgnoreDrainFailures { - return ctx.Err() - } - case err := <-errChan: - if err != nil && !ruObj.Spec.IgnoreDrainFailures { - r.error(ruObj, err, "Kubectl drain errored for node", "nodeName", nodeName) - return err - } - r.info(ruObj, "Kubectl drain completed for node", "nodeName", nodeName) - } - - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostdrainScript, mutex) - - return r.postDrainHelper(instanceID, nodeName, ruObj, nodeSteps, inProcessingNodes, mutex) -} - -// CallKubectlDrain runs the "kubectl drain" for a given node -// Node will be terminated even if pod eviction is not completed when the drain timeout is exceeded -func (r *RollingUpgradeReconciler) CallKubectlDrain(nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade, errChan chan error) { - out, err := r.ScriptRunner.drainNode(nodeName, ruObj) - if err != nil { - if strings.Contains(out, "Error from server (NotFound): nodes") { - r.error(ruObj, err, "Not executing postDrainHelper. Node not found.", "output", out) - errChan <- nil - return - } - errChan <- fmt.Errorf("%s failed to drain: %w", ruObj.NamespacedName(), err) - return - } - errChan <- nil -} - -func (r *RollingUpgradeReconciler) WaitForDesiredInstances(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - var err error - var ieb *iebackoff.IEBackoff - for ieb, err = iebackoff.NewIEBackoff(WaiterMaxDelay, WaiterMinDelay, 0.5, WaiterMaxAttempts); err == nil; err = ieb.Next() { - err = r.populateAsg(ruObj) - if err != nil { - return err - } - - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - return fmt.Errorf("Unable to load ASG with name: %s", ruObj.Name) - } - - inServiceCount := getInServiceCount(asg.Instances) - if inServiceCount == aws.Int64Value(asg.DesiredCapacity) { - r.info(ruObj, "desired capacity is met", "inServiceCount", inServiceCount) - return nil - } - - r.info(ruObj, "new instance has not yet joined the scaling group") - } - return fmt.Errorf("%s: WaitForDesiredInstances timed out while waiting for instance to be added: %w", ruObj.NamespacedName(), err) -} - -// we put old instances in standby and then wait for new instances to be InService so that desired instances is met -func (r *RollingUpgradeReconciler) WaitForDesiredNodes(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - var err error - var ieb *iebackoff.IEBackoff - for ieb, err = iebackoff.NewIEBackoff(WaiterMaxDelay, WaiterMinDelay, 0.5, WaiterMaxAttempts); err == nil; err = ieb.Next() { - err = r.populateAsg(ruObj) - if err != nil { - return err - } - - err = r.populateNodeList(ruObj, r.generatedClient.CoreV1().Nodes()) - if err != nil { - r.error(ruObj, err, "unable to populate node list") - } - - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - return fmt.Errorf("Unable to load ASG with name: %s", ruObj.Name) - } - - // get list of inService instance IDs - inServiceInstanceIds := getInServiceIds(asg.Instances) - desiredCapacity := aws.Int64Value(asg.DesiredCapacity) - - // check all asg instances are ready nodes - var foundCount int64 = 0 - for _, node := range r.NodeList.Items { - if contains(inServiceInstanceIds, r.instanceId(node)) && isNodeReady(node) && isNodePassingReadinessGates(node, ruObj.Spec.ReadinessGates) { - foundCount++ - } - } - - if foundCount == desiredCapacity { - r.info(ruObj, "desired capacity is met", "inServiceCount", foundCount) - return nil - } - - r.info(ruObj, "new node is not yet ready") - } - return fmt.Errorf("%s: WaitForDesiredNodes timed out while waiting for nodes to join: %w", ruObj.NamespacedName(), err) -} - -// read aws instance id from nodes spec.providerID -func (r *RollingUpgradeReconciler) instanceId(node corev1.Node) string { - tokens := strings.Split(node.Spec.ProviderID, "/") - return tokens[len(tokens)-1] -} - -func (r *RollingUpgradeReconciler) WaitForTermination(ruObj *upgrademgrv1alpha1.RollingUpgrade, nodeName string, nodeInterface v1.NodeInterface) (bool, error) { - if nodeName == "" { - return true, nil - } - - started := time.Now() - for { - if time.Since(started) >= (time.Second * time.Duration(TerminationTimeoutSeconds)) { - r.info(ruObj, "WaitForTermination timed out while waiting for node to unjoin") - return false, nil - } - - _, err := nodeInterface.Get(nodeName, metav1.GetOptions{}) - if k8serrors.IsNotFound(err) { - r.info(ruObj, "node is unjoined from cluster, upgrade will proceed", "nodeName", nodeName) - break - } - - r.info(ruObj, "node is still joined to cluster, will wait and retry", - "nodeName", nodeName, "terminationSleepIntervalSeconds", TerminationSleepIntervalSeconds) - - time.Sleep(time.Duration(TerminationSleepIntervalSeconds) * time.Second) - } - return true, nil -} - -func (r *RollingUpgradeReconciler) GetAutoScalingGroup(namespacedName string) (*autoscaling.Group, error) { - val, ok := r.ruObjNameToASG.Load(namespacedName) - if !ok { - return &autoscaling.Group{}, fmt.Errorf("Unable to load ASG for %s from cache", namespacedName) - } - return val, nil -} - -// SetStandby sets the autoscaling instance to standby mode. -func (r *RollingUpgradeReconciler) SetStandby(ruObj *upgrademgrv1alpha1.RollingUpgrade, instanceID string) error { - r.info(ruObj, "Setting to stand-by", ruObj.Name, instanceID) - - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - return err - } - - instanceState, err := getInstanceStateInASG(asg, instanceID) - if err != nil { - r.info(ruObj, fmt.Sprintf("WARNING: %v", err)) - return nil - } - - if instanceState == autoscaling.LifecycleStateStandby { - return nil - } - - if !isInServiceLifecycleState(instanceState) { - r.info(ruObj, "Cannot set instance to stand-by, instance is in state", "instanceState", instanceState, "instanceID", instanceID) - return nil - } - - input := &autoscaling.EnterStandbyInput{ - AutoScalingGroupName: aws.String(ruObj.Spec.AsgName), - InstanceIds: aws.StringSlice([]string{instanceID}), - ShouldDecrementDesiredCapacity: aws.Bool(false), - } - - _, err = r.ASGClient.EnterStandby(input) - if err != nil { - r.error(ruObj, err, "Failed to enter standby", "instanceID", instanceID) - } - return nil -} - -// TerminateNode actually terminates the given node. -func (r *RollingUpgradeReconciler) TerminateNode(ruObj *upgrademgrv1alpha1.RollingUpgrade, - instanceID string, - nodeName string, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) error { - - input := &autoscaling.TerminateInstanceInAutoScalingGroupInput{ - InstanceId: aws.String(instanceID), - ShouldDecrementDesiredCapacity: aws.Bool(false), - } - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationTerminate, mutex) - - var err error - var ieb *iebackoff.IEBackoff - for ieb, err = iebackoff.NewIEBackoff(WaiterMaxDelay, WaiterMinDelay, 0.5, WaiterMaxAttempts); err == nil; err = ieb.Next() { - _, err := r.ASGClient.TerminateInstanceInAutoScalingGroup(input) - if err == nil { - break - } - if aerr, ok := err.(awserr.Error); ok { - if strings.Contains(aerr.Message(), "not found") { - r.info(ruObj, "Instance not found. Moving on", "instanceID", instanceID) - return nil - } - switch aerr.Code() { - case autoscaling.ErrCodeScalingActivityInProgressFault: - r.error(ruObj, aerr, autoscaling.ErrCodeScalingActivityInProgressFault, "instanceID", instanceID) - case autoscaling.ErrCodeResourceContentionFault: - r.error(ruObj, aerr, autoscaling.ErrCodeResourceContentionFault, "instanceID", instanceID) - default: - r.error(ruObj, aerr, aerr.Code(), "instanceID", instanceID) - return err - } - } - } - if err != nil { - return err - } - r.info(ruObj, "Instance terminated.", "instanceID", instanceID) - r.info(ruObj, "starting post termination sleep", "instanceID", instanceID, "nodeIntervalSeconds", ruObj.Spec.NodeIntervalSeconds) - time.Sleep(time.Duration(ruObj.Spec.NodeIntervalSeconds) * time.Second) - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostTerminate, mutex) - return r.ScriptRunner.PostTerminate(instanceID, nodeName, ruObj) -} - -func (r *RollingUpgradeReconciler) getNodeName(i *autoscaling.Instance, nodeList *corev1.NodeList, ruObj *upgrademgrv1alpha1.RollingUpgrade) string { - node := r.getNodeFromAsg(i, nodeList, ruObj) - if node == nil { - r.info(ruObj, "Node name for instance not found", "instanceID", *i.InstanceId) - return "" - } - return node.Name -} - -func (r *RollingUpgradeReconciler) getNodeFromAsg(i *autoscaling.Instance, nodeList *corev1.NodeList, ruObj *upgrademgrv1alpha1.RollingUpgrade) *corev1.Node { - for _, n := range nodeList.Items { - tokens := strings.Split(n.Spec.ProviderID, "/") - justID := tokens[len(tokens)-1] - if *i.InstanceId == justID { - r.info(ruObj, "Found instance", "instanceID", justID, "instanceName", n.Name) - return &n - } - } - - r.info(ruObj, "Node for instance not found", "instanceID", *i.InstanceId) - return nil -} - -func (r *RollingUpgradeReconciler) populateAsg(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - // if value is still in cache, do nothing. - if !r.ruObjNameToASG.IsExpired(ruObj.NamespacedName()) { - return nil - } - - input := &autoscaling.DescribeAutoScalingGroupsInput{ - AutoScalingGroupNames: []*string{ - aws.String(ruObj.Spec.AsgName), - }, - } - result, err := r.ASGClient.DescribeAutoScalingGroups(input) - if err != nil { - r.error(ruObj, err, "Failed to describe autoscaling group") - return fmt.Errorf("%s: failed to describe autoscaling group: %w", ruObj.NamespacedName(), err) - } - - if len(result.AutoScalingGroups) == 0 { - r.info(ruObj, "%s: No ASG found with name %s!\n", ruObj.Name, ruObj.Spec.AsgName) - return fmt.Errorf("%s: no ASG found", ruObj.NamespacedName()) - } else if len(result.AutoScalingGroups) > 1 { - r.info(ruObj, "%s: Too many asgs found with name %d!\n", ruObj.Name, len(result.AutoScalingGroups)) - return fmt.Errorf("%s: Too many ASGs: %d", ruObj.NamespacedName(), len(result.AutoScalingGroups)) - } - - asg := result.AutoScalingGroups[0] - r.ruObjNameToASG.Store(ruObj.NamespacedName(), asg) - - return nil -} - -func (r *RollingUpgradeReconciler) populateLaunchTemplates(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - launchTemplates := []*ec2.LaunchTemplate{} - err := r.EC2Client.DescribeLaunchTemplatesPages(&ec2.DescribeLaunchTemplatesInput{}, func(page *ec2.DescribeLaunchTemplatesOutput, lastPage bool) bool { - launchTemplates = append(launchTemplates, page.LaunchTemplates...) - return page.NextToken != nil - }) - if err != nil { - r.error(ruObj, err, "Failed to populate launch template list") - return fmt.Errorf("failed to populate launch template list for %s: %w", ruObj.NamespacedName(), err) - } - r.LaunchTemplates = launchTemplates - return nil -} - -// store all nodes in a cache to avoid fetching them multiple times -func (r *RollingUpgradeReconciler) populateNodeList(ruObj *upgrademgrv1alpha1.RollingUpgrade, nodeInterface v1.NodeInterface) error { - nodeList, err := nodeInterface.List(metav1.ListOptions{}) - if err != nil { - msg := "Failed to get all nodes in the cluster: " + err.Error() - r.info(ruObj, msg) - return fmt.Errorf("%s: Failed to get all nodes in the cluster: %w", ruObj.NamespacedName(), err) - } - r.NodeList = nodeList - return nil -} - -func (r *RollingUpgradeReconciler) getInProgressInstances(instances []*autoscaling.Instance) ([]*autoscaling.Instance, error) { - var inProgressInstances []*autoscaling.Instance - taggedInstances, err := getTaggedInstances(EC2StateTagKey, "in-progress", r.EC2Client) - if err != nil { - return inProgressInstances, err - } - for _, instance := range instances { - if contains(taggedInstances, aws.StringValue(instance.InstanceId)) { - inProgressInstances = append(inProgressInstances, instance) - } - } - return inProgressInstances, nil -} - -// runRestack performs rollout of new nodes. -// returns number of processed instances and optional error. -func (r *RollingUpgradeReconciler) runRestack(ctx *context.Context, ruObj *upgrademgrv1alpha1.RollingUpgrade) (int, error) { - err := r.populateAsg(ruObj) - if err != nil { - return 0, fmt.Errorf("%s: Unable to populate the ASG object: %w", ruObj.NamespacedName(), err) - } - - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - return 0, fmt.Errorf("Unable to load ASG with name: %s", ruObj.Name) - } - - r.info(ruObj, "Nodes in ASG that *might* need to be updated", "asgName", *asg.AutoScalingGroupName, "asgSize", len(asg.Instances)) - - totalNodes := len(asg.Instances) - // No further processing is required if ASG doesn't have an instance running - if totalNodes == 0 { - r.info(ruObj, fmt.Sprintf("Total nodes needing update for %s is 0. Restack complete.", *asg.AutoScalingGroupName)) - return 0, nil - } - - nodeSelector := getNodeSelector(asg, ruObj) - - r.inProcessASGs.Store(*asg.AutoScalingGroupName, "running") - r.ClusterState.initializeAsg(*asg.AutoScalingGroupName, asg.Instances) - defer r.ClusterState.deleteAllInstancesInAsg(*asg.AutoScalingGroupName) - - launchDefinition := NewLaunchDefinition(asg) - - processedInstances := 0 - - inProgress, err := r.getInProgressInstances(asg.Instances) - if err != nil { - r.error(ruObj, err, "Failed to acquire in-progress instances") - } - - for processedInstances < totalNodes { - var instances []*autoscaling.Instance - if len(inProgress) == 0 { - // Fetch instances to update from node selector - instances = nodeSelector.SelectNodesForRestack(r.ClusterState) - r.info(ruObj, fmt.Sprintf("selected instances for rotation: %+v", instances)) - } else { - // Prefer in progress instances over new ones - instances = inProgress - inProgress = []*autoscaling.Instance{} - r.info(ruObj, fmt.Sprintf("found in progress instances: %+v", instances)) - } - - if instances == nil { - errorMessage := fmt.Sprintf( - "No instances available for update across all AZ's for %s. Processed %d of total %d instances", - ruObj.Name, processedInstances, totalNodes) - // No instances fetched from any AZ, stop processing - r.info(ruObj, errorMessage) - - // this should never be case, return error - return processedInstances, fmt.Errorf(errorMessage) - } - - // update the instances - err := r.UpdateInstances(ctx, ruObj, instances, launchDefinition) - processedInstances += len(instances) - if err != nil { - return processedInstances, err - } - } - return processedInstances, nil -} - -func (r *RollingUpgradeReconciler) finishExecution(err error, nodesProcessed int, ctx *context.Context, ruObj *upgrademgrv1alpha1.RollingUpgrade) { - var level string - var finalStatus string - - if err == nil { - finalStatus = upgrademgrv1alpha1.StatusComplete - level = EventLevelNormal - r.info(ruObj, "Marked object as", "finalStatus", finalStatus) - common.SetMetricRollupCompleted(ruObj.Name) - } else { - finalStatus = upgrademgrv1alpha1.StatusError - level = EventLevelWarning - r.error(ruObj, err, "Marked object as", "finalStatus", finalStatus) - common.SetMetricRollupFailed(ruObj.Name) - } - - endTime := time.Now() - ruObj.Status.EndTime = endTime.Format(time.RFC3339) - ruObj.Status.CurrentStatus = finalStatus - ruObj.Status.NodesProcessed = nodesProcessed - common.SetNodesProcessedMetric(ruObj.Spec.AsgName, ruObj.Status.NodesProcessed) - - ruObj.Status.Conditions = append(ruObj.Status.Conditions, - upgrademgrv1alpha1.RollingUpgradeCondition{ - Type: upgrademgrv1alpha1.UpgradeComplete, - Status: corev1.ConditionTrue, - }) - - startTime, err := time.Parse(time.RFC3339, ruObj.Status.StartTime) - if err != nil { - r.info(ruObj, "Failed to calculate totalProcessingTime") - } else { - var duration = endTime.Sub(startTime) - ruObj.Status.TotalProcessingTime = duration.String() - common.TotalProcessingTime(ruObj.Spec.AsgName, duration) - } - // end event - - r.createK8sV1Event(ruObj, EventReasonRUFinished, level, map[string]string{ - "status": finalStatus, - "asgName": ruObj.Spec.AsgName, - "strategy": string(ruObj.Spec.Strategy.Type), - "info": fmt.Sprintf("Rolling Upgrade as finished (status=%s)", finalStatus), - }) - - MarkObjForCleanup(ruObj) - if err := r.Status().Update(*ctx, ruObj); err != nil { - // Check if the err is "StorageError: invalid object". If so, the object was deleted... - if strings.Contains(err.Error(), "StorageError: invalid object") { - r.info(ruObj, "Object most likely deleted") - } else { - r.error(ruObj, err, "failed to update status") - } - } - - r.ClusterState.deleteAllInstancesInAsg(ruObj.Spec.AsgName) - r.info(ruObj, "Deleted the entries of ASG in the cluster store", "asgName", ruObj.Spec.AsgName) - r.inProcessASGs.Delete(ruObj.Spec.AsgName) - r.admissionMap.Delete(ruObj.NamespacedName()) - r.info(ruObj, "Deleted from admission map", "admissionMap", &r.admissionMap) -} - -// Process actually performs the ec2-instance restacking. -func (r *RollingUpgradeReconciler) Process(ctx *context.Context, - ruObj *upgrademgrv1alpha1.RollingUpgrade) { - - if ruObj.Status.CurrentStatus == upgrademgrv1alpha1.StatusComplete || - ruObj.Status.CurrentStatus == upgrademgrv1alpha1.StatusError { - r.info(ruObj, "No more processing", "currentStatus", ruObj.Status.CurrentStatus) - - if exists := ruObj.ObjectMeta.Annotations[JanitorAnnotation]; exists == "" { - r.info(ruObj, "Marking object for deletion") - MarkObjForCleanup(ruObj) - } - - r.admissionMap.Delete(ruObj.NamespacedName()) - r.info(ruObj, "Deleted object from admission map") - return - } - // start event - r.createK8sV1Event(ruObj, EventReasonRUStarted, EventLevelNormal, map[string]string{ - "status": "started", - "asgName": ruObj.Spec.AsgName, - "strategy": string(ruObj.Spec.Strategy.Type), - "msg": "Rolling Upgrade has started", - }) - r.CacheConfig.FlushCache("autoscaling") - err := r.populateAsg(ruObj) - if err != nil { - r.finishExecution(err, 0, ctx, ruObj) - return - } - - //TODO(shri): Ensure that no node is Unschedulable at this time. - err = r.populateNodeList(ruObj, r.generatedClient.CoreV1().Nodes()) - if err != nil { - r.finishExecution(err, 0, ctx, ruObj) - return - } - - if err := r.populateLaunchTemplates(ruObj); err != nil { - r.finishExecution(err, 0, ctx, ruObj) - return - } - - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - r.error(ruObj, err, "Unable to load ASG for rolling upgrade") - r.finishExecution(err, 0, ctx, ruObj) - return - } - - // Update the CR with some basic info before staring the restack. - ruObj.Status.StartTime = time.Now().Format(time.RFC3339) - ruObj.Status.CurrentStatus = upgrademgrv1alpha1.StatusRunning - ruObj.Status.NodesProcessed = 0 - ruObj.Status.TotalNodes = int(aws.Int64Value(asg.DesiredCapacity)) - - common.SetTotalNodesMetric(ruObj.Spec.AsgName, ruObj.Status.TotalNodes) - common.SetNodesProcessedMetric(ruObj.Spec.AsgName, 0) - - common.SetMetricRollupInitOrRunning(ruObj.Name) - - if err := r.Status().Update(*ctx, ruObj); err != nil { - r.error(ruObj, err, "failed to update status") - } - - // Run the restack that actually performs the rolling update. - nodesProcessed, err := r.runRestack(ctx, ruObj) - if err != nil { - r.error(ruObj, err, "Failed to runRestack") - r.finishExecution(err, nodesProcessed, ctx, ruObj) - return - } - - //Validation step: check if all the nodes have the latest launchconfig. - r.info(ruObj, "Validating the launch definition of nodes and ASG") - if err := r.validateNodesLaunchDefinition(ruObj); err != nil { - r.error(ruObj, err, "Launch definition validation failed") - r.finishExecution(err, nodesProcessed, ctx, ruObj) - return - } - - // no error -> report success - r.finishExecution(nil, nodesProcessed, ctx, ruObj) -} - -//Check if ec2Instances and the ASG have same launch config. -func (r *RollingUpgradeReconciler) validateNodesLaunchDefinition(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - //Get ASG launch config - var err error - err = r.populateAsg(ruObj) - if err != nil { - return fmt.Errorf("%s: Unable to populate the ASG object: %w", ruObj.NamespacedName(), err) - } - asg, err := r.GetAutoScalingGroup(ruObj.NamespacedName()) - if err != nil { - return fmt.Errorf("%s: Unable to load ASG with name: %w", ruObj.NamespacedName(), err) - } - launchDefinition := NewLaunchDefinition(asg) - launchConfigASG, launchTemplateASG := launchDefinition.launchConfigurationName, launchDefinition.launchTemplate - - //Get ec2 instances and their launch configs. - ec2instances := asg.Instances - for _, ec2Instance := range ec2instances { - ec2InstanceID, ec2InstanceLaunchConfig, ec2InstanceLaunchTemplate := ec2Instance.InstanceId, ec2Instance.LaunchConfigurationName, ec2Instance.LaunchTemplate - if aws.StringValue(ec2Instance.LifecycleState) != InService { - continue - } - if aws.StringValue(launchConfigASG) != aws.StringValue(ec2InstanceLaunchConfig) { - return fmt.Errorf("launch config mismatch, %s instance config - %s, does not match the asg config", aws.StringValue(ec2InstanceID), aws.StringValue(ec2InstanceLaunchConfig)) - } else if launchTemplateASG != nil && ec2InstanceLaunchTemplate != nil { - if aws.StringValue(launchTemplateASG.LaunchTemplateId) != aws.StringValue(ec2InstanceLaunchTemplate.LaunchTemplateId) { - return fmt.Errorf("launch template mismatch, %s instance template - %s, does not match the asg template", aws.StringValue(ec2InstanceID), aws.StringValue(ec2InstanceLaunchTemplate.LaunchTemplateId)) - } - } - } - return nil -} - -// MarkObjForCleanup sets the annotation on the given object for deletion. -func MarkObjForCleanup(ruObj *upgrademgrv1alpha1.RollingUpgrade) { - if ruObj.ObjectMeta.Annotations == nil { - ruObj.ObjectMeta.Annotations = map[string]string{} - } - - switch ruObj.Status.CurrentStatus { - case upgrademgrv1alpha1.StatusComplete: - ruObj.ObjectMeta.Annotations[JanitorAnnotation] = ClearCompletedFrequency - case upgrademgrv1alpha1.StatusError: - ruObj.ObjectMeta.Annotations[JanitorAnnotation] = ClearErrorFrequency - } + logr.Logger + Scheme *runtime.Scheme + AdmissionMap sync.Map + CacheConfig *cache.Config + EventWriter *kubeprovider.EventWriter + maxParallel int + ScriptRunner ScriptRunner + Auth *RollingUpgradeAuthenticator + DrainGroupMapper *sync.Map + DrainErrorMapper *sync.Map + ClusterNodesMap *sync.Map + ReconcileMap *sync.Map + DrainTimeout int + IgnoreDrainFailures bool +} + +// RollingUpgradeAuthenticator has the clients for providers +type RollingUpgradeAuthenticator struct { + *awsprovider.AmazonClientSet + *kubeprovider.KubernetesClientSet } // +kubebuilder:rbac:groups=upgrademgr.keikoproj.io,resources=rollingupgrades,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=upgrademgr.keikoproj.io,resources=rollingupgrades/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;patch +// +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;patch;watch // +kubebuilder:rbac:groups=core,resources=pods,verbs=list // +kubebuilder:rbac:groups=core,resources=events,verbs=create // +kubebuilder:rbac:groups=core,resources=pods/eviction,verbs=create // +kubebuilder:rbac:groups=extensions;apps,resources=daemonsets;replicasets;statefulsets,verbs=get // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get -// Reconcile reads that state of the cluster for a RollingUpgrade object and makes changes based on the state read +// reconcile reads that state of the cluster for a RollingUpgrade object and makes changes based on the state read // and the details in the RollingUpgrade.Spec -func (r *RollingUpgradeReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - ctx := context.Background() - - // Fetch the RollingUpgrade instance - ruObj := &upgrademgrv1alpha1.RollingUpgrade{} - err := r.Get(ctx, req.NamespacedName, ruObj) +func (r *RollingUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + r.Info("***Reconciling***") + rollingUpgrade := &v1alpha1.RollingUpgrade{} + err := r.Get(ctx, req.NamespacedName, rollingUpgrade) if err != nil { - if k8serrors.IsNotFound(err) { - // Object not found, return. Created objects are automatically garbage collected. - // For additional cleanup logic use finalizers. - r.admissionMap.Delete(req.NamespacedName) - r.info(ruObj, "Deleted object from map", "name", req.NamespacedName) + if kerrors.IsNotFound(err) { + r.AdmissionMap.Delete(req.NamespacedName.String()) + r.Info("rolling upgrade resource not found, deleted object from admission map", "name", req.NamespacedName) return ctrl.Result{}, nil } - // Error reading the object - requeue the request. return ctrl.Result{}, err } - // If the resource is being deleted, remove it from the admissionMap - if !ruObj.DeletionTimestamp.IsZero() { - r.info(ruObj, "Object is being deleted. No more processing") - r.admissionMap.Delete(ruObj.NamespacedName()) - r.ruObjNameToASG.Delete(ruObj.NamespacedName()) - r.info(ruObj, "Deleted object from admission map") + // if the resource is being deleted, remove it from the admissionMap + if !rollingUpgrade.DeletionTimestamp.IsZero() { + r.AdmissionMap.Delete(rollingUpgrade.NamespacedName()) + r.Info("rolling upgrade deleted", "name", rollingUpgrade.NamespacedName()) return reconcile.Result{}, nil } - // set the state of instances in the ASG to new in the cluster store - _, exists := r.inProcessASGs.Load(ruObj.Spec.AsgName) - if exists { - r.info(ruObj, "ASG "+ruObj.Spec.AsgName+" is being processed. Requeuing") - return reconcile.Result{Requeue: true, RequeueAfter: time.Duration(60) * time.Second}, nil + // stop processing upgrades which are in finite state + currentStatus := rollingUpgrade.CurrentStatus() + if common.ContainsEqualFold(v1alpha1.FiniteStates, currentStatus) { + r.AdmissionMap.Delete(rollingUpgrade.NamespacedName()) + r.Info("rolling upgrade ended", "name", rollingUpgrade.NamespacedName(), "status", currentStatus) + return reconcile.Result{}, nil } - // Setting default values for the Strategy in rollup object - r.setDefaultsForRollingUpdateStrategy(ruObj) - r.info(ruObj, "Default strategy settings applied.", "updateStrategy", ruObj.Spec.Strategy) - - err = r.validateRollingUpgradeObj(ruObj) - if err != nil { - r.error(ruObj, err, "Validation failed") + if ok, err := rollingUpgrade.Validate(); !ok { return reconcile.Result{}, err } - result, ok := r.admissionMap.Load(ruObj.NamespacedName()) - if ok { - if result == "processing" { - r.info(ruObj, "Found obj in map:", "name", ruObj.NamespacedName()) - r.info(ruObj, "Object already being processed", "name", ruObj.NamespacedName()) - } else { - r.info(ruObj, "Sync map with invalid entry for ", "name", ruObj.NamespacedName()) - } - } else { - r.info(ruObj, "Adding obj to map: ", "name", ruObj.NamespacedName()) - r.admissionMap.Store(ruObj.NamespacedName(), "processing") - go r.Process(&ctx, ruObj) - } + // defer a status update on the resource + defer r.UpdateStatus(rollingUpgrade) - return ctrl.Result{}, nil -} + var ( + scalingGroupName = rollingUpgrade.ScalingGroupName() + inProgress bool + ) -// SetupWithManager creates a new manager. -func (r *RollingUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.generatedClient = kubernetes.NewForConfigOrDie(mgr.GetConfig()) - return ctrl.NewControllerManagedBy(mgr). - For(&upgrademgrv1alpha1.RollingUpgrade{}). - WithOptions(controller.Options{MaxConcurrentReconciles: r.maxParallel}). - Complete(r) -} - -func (r *RollingUpgradeReconciler) setStateTag(ruObj *upgrademgrv1alpha1.RollingUpgrade, instanceID string, state string) error { - r.info(ruObj, "setting instance state", "instanceID", instanceID, "instanceState", state) - return tagEC2instance(instanceID, EC2StateTagKey, state, r.EC2Client) -} - -// validateRollingUpgradeObj validates rollup object for the type, maxUnavailable and drainTimeout -func (r *RollingUpgradeReconciler) validateRollingUpgradeObj(ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - strategy := ruObj.Spec.Strategy - - var nilStrategy = upgrademgrv1alpha1.UpdateStrategy{} - if strategy == nilStrategy { - return nil - } - // validating the maxUnavailable value - if strategy.MaxUnavailable.Type == intstr.Int { - if strategy.MaxUnavailable.IntVal <= 0 { - err := fmt.Errorf("%s: Invalid value for maxUnavailable - %d", - ruObj.Name, strategy.MaxUnavailable.IntVal) - r.error(ruObj, err, "Invalid value for maxUnavailable", "value", strategy.MaxUnavailable.IntVal) - return err - } - } else if strategy.MaxUnavailable.Type == intstr.String { - strVal := strategy.MaxUnavailable.StrVal - intValue, _ := strconv.Atoi(strings.Trim(strVal, "%")) - if intValue <= 0 || intValue > 100 { - err := fmt.Errorf("%s: Invalid value for maxUnavailable - %s", - ruObj.Name, strategy.MaxUnavailable.StrVal) - r.error(ruObj, err, "Invalid value for maxUnavailable", "value", strategy.MaxUnavailable.StrVal) - return err - } - } - - // validating the strategy type - if strategy.Type != upgrademgrv1alpha1.RandomUpdateStrategy && - strategy.Type != upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy { - err := fmt.Errorf("%s: Invalid value for strategy type - %s", ruObj.NamespacedName(), strategy.Type) - r.error(ruObj, err, "Invalid value for strategy type", "value", strategy.Type) - return err - } - return nil -} - -// setDefaultsForRollingUpdateStrategy sets the default values for type, maxUnavailable and drainTimeout -func (r *RollingUpgradeReconciler) setDefaultsForRollingUpdateStrategy(ruObj *upgrademgrv1alpha1.RollingUpgrade) { - if ruObj.Spec.Strategy.Type == "" { - ruObj.Spec.Strategy.Type = upgrademgrv1alpha1.RandomUpdateStrategy + // at any given point in time, there should be only one reconcile operation running per ASG + if _, present := r.ReconcileMap.LoadOrStore(rollingUpgrade.NamespacedName(), scalingGroupName); present { + r.Info("a reconcile operation is already in progress for this ASG, requeuing", "scalingGroup", scalingGroupName, "name", rollingUpgrade.NamespacedName()) + return ctrl.Result{RequeueAfter: v1alpha1.DefaultRequeueTime}, nil } - if ruObj.Spec.Strategy.Mode == "" { - // default to lazy mode - ruObj.Spec.Strategy.Mode = upgrademgrv1alpha1.UpdateStrategyModeLazy - } - // Set default max unavailable to 1. - if ruObj.Spec.Strategy.MaxUnavailable.Type == intstr.Int && ruObj.Spec.Strategy.MaxUnavailable.IntVal == 0 { - ruObj.Spec.Strategy.MaxUnavailable.IntVal = 1 - } - if ruObj.Spec.Strategy.DrainTimeout == 0 { - ruObj.Spec.Strategy.DrainTimeout = -1 - } -} - -type UpdateInstancesError struct { - InstanceUpdateErrors []error -} -func (error UpdateInstancesError) Error() string { - return fmt.Sprintf("Error updating instances, ErrorCount: %d, Errors: %v", - len(error.InstanceUpdateErrors), error.InstanceUpdateErrors) -} - -func NewUpdateInstancesError(instanceUpdateErrors []error) *UpdateInstancesError { - return &UpdateInstancesError{InstanceUpdateErrors: instanceUpdateErrors} -} - -func (r *RollingUpgradeReconciler) UpdateInstances(ctx *context.Context, - ruObj *upgrademgrv1alpha1.RollingUpgrade, - instances []*autoscaling.Instance, - launchDefinition *launchDefinition) error { - - totalNodes := len(instances) - if totalNodes == 0 { - return nil - } - - //A map to retain the steps for multiple nodes - nodeSteps := make(map[string][]v1alpha1.NodeStepDuration) - - inProcessingNodes := make(map[string]*v1alpha1.NodeInProcessing) - - mutex := &sync.Mutex{} - - ch := make(chan error) - - for _, instance := range instances { - // log it before we start updating the instance - r.createK8sV1Event(ruObj, EventReasonRUInstanceStarted, EventLevelNormal, map[string]string{ - "status": "in-progress", - "asgName": ruObj.Spec.AsgName, - "strategy": string(ruObj.Spec.Strategy.Type), - "msg": fmt.Sprintf("Started Updating Instance %s, in AZ: %s", *instance.InstanceId, *instance.AvailabilityZone), - }) - go r.UpdateInstance(ctx, ruObj, instance, launchDefinition, ch, nodeSteps, inProcessingNodes, mutex) - } - - // wait for upgrades to complete - nodesProcessed := 0 - var instanceUpdateErrors []error - - for err := range ch { - nodesProcessed++ - switch err { - case nil: - // do nothing - default: - instanceUpdateErrors = append(instanceUpdateErrors, err) - } - // log the event - r.createK8sV1Event(ruObj, EventReasonRUInstanceFinished, EventLevelNormal, map[string]string{ - "status": "in-progress", - "asgName": ruObj.Spec.AsgName, - "strategy": string(ruObj.Spec.Strategy.Type), - "msg": fmt.Sprintf("Finished Updating Instance %d/%d (Errors=%d)", nodesProcessed, totalNodes, len(instanceUpdateErrors)), - }) - // break if we are done with all the nodes - if nodesProcessed == totalNodes { - break + // handle condition where multiple resources submitted targeting the same scaling group by requeing + r.AdmissionMap.Range(func(k, v interface{}) bool { + val := v.(string) + resource := k.(string) + if strings.EqualFold(val, scalingGroupName) && !strings.EqualFold(resource, rollingUpgrade.NamespacedName()) { + r.Info("object already being processed by existing resource", "resource", resource, "scalingGroup", scalingGroupName, "name", rollingUpgrade.NamespacedName()) + inProgress = true + return false } - } - - ruObj.Status.UpdateStatistics(nodeSteps) - ruObj.Status.UpdateLastBatchNodes(inProcessingNodes) - - if len(instanceUpdateErrors) > 0 { - return NewUpdateInstancesError(instanceUpdateErrors) - } - return nil -} - -func (r *RollingUpgradeReconciler) UpdateInstanceEager( - ruObj *upgrademgrv1alpha1.RollingUpgrade, - nodeName, - targetInstanceID string, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) error { - - // Set instance to standby - err := r.SetStandby(ruObj, targetInstanceID) - if err != nil { - return err - } - - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationDesiredNodeReady, mutex) - - // Wait for new instance to be created - err = r.WaitForDesiredInstances(ruObj) - if err != nil { - return err - } - - // Wait for in-service nodes to be ready and match desired - err = r.WaitForDesiredNodes(ruObj) - if err != nil { - return err - } - - // Drain and wait for draining node. - return r.DrainTerminate(ruObj, nodeName, targetInstanceID, nodeSteps, inProcessingNodes, mutex) -} - -func (r *RollingUpgradeReconciler) DrainTerminate( - ruObj *upgrademgrv1alpha1.RollingUpgrade, - nodeName, - targetInstanceID string, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) error { - - // Drain and wait for draining node. - if nodeName != "" { - if err := r.DrainNode(ruObj, nodeName, targetInstanceID, ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex); err != nil { - return err - } - ruObj.Status.LastNodeDrainTime = &metav1.Time{Time: time.Now()} - } - - // Terminate instance. - err := r.TerminateNode(ruObj, targetInstanceID, nodeName, nodeSteps, inProcessingNodes, mutex) - if err != nil { - return err - } - - ruObj.Status.LastNodeTerminationTime = &metav1.Time{Time: time.Now()} - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationCompleted, mutex) - - return nil -} - -// UpdateInstance runs the rolling upgrade on one instance from an autoscaling group -func (r *RollingUpgradeReconciler) UpdateInstance(ctx *context.Context, - ruObj *upgrademgrv1alpha1.RollingUpgrade, - i *autoscaling.Instance, - launchDefinition *launchDefinition, - ch chan error, - nodeSteps map[string][]v1alpha1.NodeStepDuration, - inProcessingNodes map[string]*v1alpha1.NodeInProcessing, - mutex *sync.Mutex) { - targetInstanceID := aws.StringValue(i.InstanceId) - // If an instance was marked as "in-progress" in ClusterState, it has to be marked - // completed so that it can get considered again in a subsequent rollup CR. - defer r.ClusterState.markUpdateCompleted(targetInstanceID) - - // Check if the rollingupgrade object still exists - _, ok := r.admissionMap.Load(ruObj.NamespacedName()) - if !ok { - r.info(ruObj, "Object either force completed or deleted. Ignoring node update") - ruObj.Status.NodesProcessed = ruObj.Status.NodesProcessed + 1 - common.SetNodesProcessedMetric(ruObj.Spec.AsgName, ruObj.Status.NodesProcessed) - ch <- nil - return - } - - // If the running node has the same launchconfig as the asg, - // there is no need to refresh it. - if !r.requiresRefresh(ruObj, i, launchDefinition) { - ruObj.Status.NodesProcessed = ruObj.Status.NodesProcessed + 1 - common.SetNodesProcessedMetric(ruObj.Spec.AsgName, ruObj.Status.NodesProcessed) - if err := r.Status().Update(*ctx, ruObj); err != nil { - r.error(ruObj, err, "failed to update status") - } - ch <- nil - return - } - - nodeName := r.getNodeName(i, r.NodeList, ruObj) + return true + }) - // set the EC2 tag indicating the state to in-progress - err := r.setStateTag(ruObj, targetInstanceID, "in-progress") - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == "InvalidInstanceID.NotFound" { - ch <- nil - return - } - } - ch <- err - return + if inProgress { + // requeue any resources which are already being processed by a different resource, until the resource is completed/deleted + return ctrl.Result{RequeueAfter: v1alpha1.DefaultRequeueTime}, nil } - mode := ruObj.Spec.Strategy.Mode.String() - if strings.ToLower(mode) == upgrademgrv1alpha1.UpdateStrategyModeEager.String() { - r.info(ruObj, "starting replacement with eager mode", "mode", mode) - //Add statistics - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationKickoff, mutex) - err = r.UpdateInstanceEager(ruObj, nodeName, targetInstanceID, nodeSteps, inProcessingNodes, mutex) - } else if strings.ToLower(mode) == upgrademgrv1alpha1.UpdateStrategyModeLazy.String() { - r.info(ruObj, "starting replacement with lazy mode", "mode", mode) - //Add statistics - ruObj.Status.NodeStep(inProcessingNodes, nodeSteps, ruObj.Spec.AsgName, nodeName, v1alpha1.NodeRotationKickoff, mutex) - err = r.DrainTerminate(ruObj, nodeName, targetInstanceID, nodeSteps, inProcessingNodes, mutex) + // store the rolling upgrade in admission map + if _, present := r.AdmissionMap.LoadOrStore(rollingUpgrade.NamespacedName(), scalingGroupName); !present { + r.Info("admitted new rolling upgrade", "scalingGroup", scalingGroupName, "update strategy", rollingUpgrade.Spec.Strategy, "name", rollingUpgrade.NamespacedName()) + r.CacheConfig.FlushCache("autoscaling") } else { - err = fmt.Errorf("%s: unhandled strategy mode: %s", ruObj.NamespacedName(), mode) - } - - if err != nil { - ch <- err - return - } + r.Info("operating on existing rolling upgrade", "scalingGroup", scalingGroupName, "update strategy", rollingUpgrade.Spec.Strategy, "name", rollingUpgrade.NamespacedName()) + } + rollingUpgrade.SetCurrentStatus(v1alpha1.StatusInit) + common.SetMetricRollupInitOrRunning(rollingUpgrade.Name) + + // setup the RollingUpgradeContext needed for node rotations. + drainGroup, _ := r.DrainGroupMapper.LoadOrStore(rollingUpgrade.NamespacedName(), &sync.WaitGroup{}) + drainErrs, _ := r.DrainErrorMapper.LoadOrStore(rollingUpgrade.NamespacedName(), make(chan error)) + + rollupCtx := &RollingUpgradeContext{ + Logger: r.Logger, + Auth: r.Auth, + ScriptRunner: r.ScriptRunner, + DrainManager: &DrainManager{ + DrainErrors: drainErrs.(chan error), + DrainGroup: drainGroup.(*sync.WaitGroup), + }, + RollingUpgrade: rollingUpgrade, + metricsMutex: &sync.Mutex{}, - unjoined, err := r.WaitForTermination(ruObj, nodeName, r.generatedClient.CoreV1().Nodes()) - if err != nil { - ch <- err - return - } + // discover the K8s cluster at controller level through watch + Cloud: func() *DiscoveredState { + var c = NewDiscoveredState(r.Auth, r.Logger) + c.ClusterNodes = r.getClusterNodes() + return c + }(), - if !unjoined { - r.info(ruObj, "termination waiter completed but node is still joined, will proceed with upgrade", "nodeName", nodeName) + DrainTimeout: r.DrainTimeout, + IgnoreDrainFailures: r.IgnoreDrainFailures, } - err = r.setStateTag(ruObj, targetInstanceID, "completed") - if err != nil { - r.info(ruObj, "Setting tag on the instance post termination failed.", "nodeName", nodeName) - } - ruObj.Status.NodesProcessed = ruObj.Status.NodesProcessed + 1 - common.SetNodesProcessedMetric(ruObj.Spec.AsgName, ruObj.Status.NodesProcessed) - if err := r.Status().Update(*ctx, ruObj); err != nil { - // Check if the err is "StorageError: invalid object". If so, the object was deleted... - if strings.Contains(err.Error(), "StorageError: invalid object") { - r.info(ruObj, "Object mostly deleted") - } else { - r.error(ruObj, err, "failed to update status") - } + // process node rotation + if err := rollupCtx.RotateNodes(); err != nil { + rollingUpgrade.SetCurrentStatus(v1alpha1.StatusError) + common.SetMetricRollupFailed(rollingUpgrade.Name) + return ctrl.Result{}, err } - ch <- nil -} - -func (r *RollingUpgradeReconciler) getNodeCreationTimestamp(ec2Instance *autoscaling.Instance) (bool, time.Time) { - for _, node := range r.NodeList.Items { - tokens := strings.Split(node.Spec.ProviderID, "/") - instanceID := tokens[len(tokens)-1] - if instanceID == aws.StringValue(ec2Instance.InstanceId) { - return true, node.ObjectMeta.CreationTimestamp.Time - } - } - return false, time.Time{} + return reconcile.Result{RequeueAfter: v1alpha1.DefaultRequeueTime}, nil } -func (r *RollingUpgradeReconciler) getTemplateLatestVersion(templateName string) string { - for _, t := range r.LaunchTemplates { - name := aws.StringValue(t.LaunchTemplateName) - if name == templateName { - versionInt := aws.Int64Value(t.LatestVersionNumber) - return strconv.FormatInt(versionInt, 10) - } - } - return "0" +// SetupWithManager sets up the controller with the Manager. +func (r *RollingUpgradeReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.RollingUpgrade{}). + Watches(&source.Kind{Type: &corev1.Node{}}, nil). + WithEventFilter(r.NodeEventsHandler()). + WithOptions(controller.Options{MaxConcurrentReconciles: r.maxParallel}). + Complete(r) } -// is the instance not using expected launch template ? -func (r *RollingUpgradeReconciler) requiresRefresh(ruObj *upgrademgrv1alpha1.RollingUpgrade, ec2Instance *autoscaling.Instance, - definition *launchDefinition) bool { - - instanceID := aws.StringValue(ec2Instance.InstanceId) - if ruObj.Spec.ForceRefresh { - if ok, nodeCreationTS := r.getNodeCreationTimestamp(ec2Instance); ok { - if nodeCreationTS.Before(ruObj.CreationTimestamp.Time) { - r.info(ruObj, "rolling upgrade configured for forced refresh") - return true +// NodesEventHandler will fetch us the nodes on corresponding events, an alternative to doing explicit API calls. +func (r *RollingUpgradeReconciler) NodeEventsHandler() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + nodeObj, ok := e.Object.(*corev1.Node) + if ok { + nodeName := e.Object.GetName() + log.Debug("nodeEventsHandler[create] nodeObj created, stored in sync map", "nodeName", nodeName) + r.ClusterNodesMap.Store(nodeName, nodeObj) + return false } - } - - r.info(ruObj, "node", instanceID, "created after rollingupgrade object. Ignoring forceRefresh") - return false - } - if definition.launchConfigurationName != nil { - if *(definition.launchConfigurationName) != aws.StringValue(ec2Instance.LaunchConfigurationName) { - r.info(ruObj, "node", instanceID, "launch configuration name differs") - return true - } - } else if definition.launchTemplate != nil { - instanceLaunchTemplate := ec2Instance.LaunchTemplate - targetLaunchTemplate := definition.launchTemplate - - if instanceLaunchTemplate == nil { - r.info(ruObj, "node", instanceID, "instance switching to launch template") - return true - } - - var ( - instanceTemplateId = aws.StringValue(instanceLaunchTemplate.LaunchTemplateId) - templateId = aws.StringValue(targetLaunchTemplate.LaunchTemplateId) - instanceTemplateName = aws.StringValue(instanceLaunchTemplate.LaunchTemplateName) - templateName = aws.StringValue(targetLaunchTemplate.LaunchTemplateName) - instanceVersion = aws.StringValue(instanceLaunchTemplate.Version) - templateVersion = r.getTemplateLatestVersion(templateName) - ) - - if instanceTemplateId != templateId { - r.info(ruObj, "node", instanceID, "launch template id differs", "instanceTemplateId", instanceTemplateId, "templateId", templateId) return true - } - if instanceTemplateName != templateName { - r.info(ruObj, "node", instanceID, "launch template name differs", "instanceTemplateName", instanceTemplateName, "templateName", templateName) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + nodeObj, ok := e.ObjectNew.(*corev1.Node) + if ok { + nodeName := e.ObjectNew.GetName() + log.Debug("nodeEventsHandler[update] nodeObj updated, updated in sync map", "nodeName", nodeName) + r.ClusterNodesMap.Store(nodeName, nodeObj) + return false + } return true - } - - if instanceVersion != templateVersion { - r.info(ruObj, "node", instanceID, "launch template version differs", "instanceVersion", instanceVersion, "templateVersion", templateVersion) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + _, ok := e.Object.(*corev1.Node) + if ok { + nodeName := e.Object.GetName() + r.ClusterNodesMap.Delete(nodeName) + log.Debug("nodeEventsHandler[delete] - nodeObj not found, deleted from sync map", "name", nodeName) + return false + } return true - } + }, } - - r.info(ruObj, "node", instanceID, "node refresh not required") - return false } -// logger creates logger for rolling upgrade. -func (r *RollingUpgradeReconciler) logger(ruObj *upgrademgrv1alpha1.RollingUpgrade) logr.Logger { - return r.Log.WithValues("rollingupgrade", ruObj.NamespacedName()) +// number of reconciles the upgrade-manager should handle in parallel +func (r *RollingUpgradeReconciler) SetMaxParallel(n int) { + if n >= 1 { + r.Info("setting max parallel reconcile", "value", n) + r.maxParallel = n + } } -// info logs message with Info level for the specified rolling upgrade. -func (r *RollingUpgradeReconciler) info(ruObj *upgrademgrv1alpha1.RollingUpgrade, msg string, keysAndValues ...interface{}) { - r.logger(ruObj).Info(msg, keysAndValues...) +// at the end of every reconcile, update the RollingUpgrade object +func (r *RollingUpgradeReconciler) UpdateStatus(rollingUpgrade *v1alpha1.RollingUpgrade) { + r.ReconcileMap.LoadAndDelete(rollingUpgrade.NamespacedName()) + if err := r.Status().Update(context.Background(), rollingUpgrade); err != nil { + r.Info("failed to update status", "message", err.Error(), "name", rollingUpgrade.NamespacedName()) + } } -// error logs message with Error level for the specified rolling upgrade. -func (r *RollingUpgradeReconciler) error(ruObj *upgrademgrv1alpha1.RollingUpgrade, err error, msg string, keysAndValues ...interface{}) { - r.logger(ruObj).Error(err, msg, keysAndValues...) +// extract node objects from syncMap to a slice +func (r *RollingUpgradeReconciler) getClusterNodes() []*corev1.Node { + var clusterNodes []*corev1.Node + + m := map[string]interface{}{} + r.ClusterNodesMap.Range(func(key, value interface{}) bool { + m[fmt.Sprint(key)] = value + return true + }) + for _, value := range m { + clusterNodes = append(clusterNodes, value.(*corev1.Node)) + } + return clusterNodes } diff --git a/controllers/rollingupgrade_controller_test.go b/controllers/rollingupgrade_controller_test.go deleted file mode 100644 index b0da1bf5..00000000 --- a/controllers/rollingupgrade_controller_test.go +++ /dev/null @@ -1,3065 +0,0 @@ -package controllers - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "k8s.io/client-go/kubernetes/scheme" - log2 "sigs.k8s.io/controller-runtime/pkg/log" - - "github.com/keikoproj/aws-sdk-go-cache/cache" - "github.com/keikoproj/upgrade-manager/pkg/log" - - "k8s.io/apimachinery/pkg/util/intstr" - - "gopkg.in/yaml.v2" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/aws/aws-sdk-go/service/autoscaling/autoscalingiface" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ec2/ec2iface" - "github.com/onsi/gomega" - "golang.org/x/net/context" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - v1 "k8s.io/client-go/kubernetes/typed/core/v1" - "sigs.k8s.io/controller-runtime/pkg/envtest" - "sigs.k8s.io/controller-runtime/pkg/manager" - - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" -) - -var nodeSteps = make(map[string][]upgrademgrv1alpha1.NodeStepDuration) -var inProcessingNodes = make(map[string]*upgrademgrv1alpha1.NodeInProcessing) -var mutex = &sync.Mutex{} - -func TestMain(m *testing.M) { - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - - var err error - cfg, err = testEnv.Start() - if err != nil { - panic(err) - } - os.Exit(m.Run()) -} - -func TestErrorStatusMarkJanitor(t *testing.T) { - g := gomega.NewGomegaWithT(t) - someAsg := "someAsg" - instance := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg}, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - rcRollingUpgrade := &RollingUpgradeReconciler{Client: mgr.GetClient(), - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - ctx := context.TODO() - err = fmt.Errorf("execution error") - rcRollingUpgrade.inProcessASGs.Store(someAsg, "processing") - rcRollingUpgrade.finishExecution(err, 3, &ctx, instance) - g.Expect(instance.ObjectMeta.Annotations[JanitorAnnotation]).To(gomega.Equal(ClearErrorFrequency)) - _, exists := rcRollingUpgrade.inProcessASGs.Load(someAsg) - g.Expect(exists).To(gomega.BeFalse()) -} - -func TestMarkObjForCleanupCompleted(t *testing.T) { - g := gomega.NewGomegaWithT(t) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Status.CurrentStatus = upgrademgrv1alpha1.StatusComplete - - g.Expect(ruObj.ObjectMeta.Annotations).To(gomega.BeNil()) - MarkObjForCleanup(ruObj) - g.Expect(ruObj.ObjectMeta.Annotations[JanitorAnnotation]).To(gomega.Equal(ClearCompletedFrequency)) -} - -func TestMarkObjForCleanupError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Status.CurrentStatus = upgrademgrv1alpha1.StatusError - - g.Expect(ruObj.ObjectMeta.Annotations).To(gomega.BeNil()) - MarkObjForCleanup(ruObj) - g.Expect(ruObj.ObjectMeta.Annotations[JanitorAnnotation]).To(gomega.Equal(ClearErrorFrequency)) -} - -func TestMarkObjForCleanupNothingHappens(t *testing.T) { - g := gomega.NewGomegaWithT(t) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Status.CurrentStatus = "some other status" - - g.Expect(ruObj.ObjectMeta.Annotations).To(gomega.BeNil()) - MarkObjForCleanup(ruObj) - g.Expect(ruObj.ObjectMeta.Annotations).To(gomega.BeEmpty()) -} - -func TestPreDrainScriptSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PreDrain.Script = "echo 'Predrain script ran without error'" - - rcRollingUpgrade := createReconciler() - err := rcRollingUpgrade.preDrainHelper("test-instance-id", "test", ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestPreDrainScriptError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PreDrain.Script = "exit 1" - - rcRollingUpgrade := createReconciler() - err := rcRollingUpgrade.preDrainHelper("test-instance-id", "test", ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Failed to run preDrain script")) -} - -func TestPostDrainHelperPostDrainScriptSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.Script = "echo Hello, postDrainScript!" - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.BeNil()) -} - -func TestPostDrainHelperPostDrainScriptError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "k() { echo $@ >> cmdlog.txt ; }; k" - os.Remove("cmdlog.txt") - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.Script = "exit 1" - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - - // assert node was uncordoned - cmdlog, _ := ioutil.ReadFile("cmdlog.txt") - g.Expect(string(cmdlog)).To(gomega.Equal(fmt.Sprintf("uncordon %s\n", mockNode))) - os.Remove("cmdlog.txt") -} - -func TestPostDrainHelperPostDrainScriptErrorWithIgnoreDrainFailures(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "k() { echo $@ >> cmdlog.txt ; }; k" - os.Remove("cmdlog.txt") - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{IgnoreDrainFailures: true}} - ruObj.Spec.PostDrain.Script = "exit 1" - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - - // assert node was not uncordoned - cmdlog, _ := ioutil.ReadFile("cmdlog.txt") - g.Expect(string(cmdlog)).To(gomega.Equal("")) - os.Remove("cmdlog.txt") -} - -func TestPostDrainHelperPostDrainWaitScriptSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.PostWaitScript = "echo Hello, postDrainWaitScript!" - ruObj.Spec.PostDrainDelaySeconds = 0 - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.BeNil()) -} - -func TestPostDrainHelperPostDrainWaitScriptError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "k() { echo $@ >> cmdlog.txt ; }; k" - os.Remove("cmdlog.txt") - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.PostWaitScript = "exit 1" - ruObj.Spec.PostDrainDelaySeconds = 0 - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - - // assert node was uncordoned - cmdlog, _ := ioutil.ReadFile("cmdlog.txt") - g.Expect(string(cmdlog)).To(gomega.Equal(fmt.Sprintf("uncordon %s\n", mockNode))) - os.Remove("cmdlog.txt") -} - -func TestPostDrainHelperPostDrainWaitScriptErrorWithIgnoreDrainFailures(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "k() { echo $@ >> cmdlog.txt ; }; k" - os.Remove("cmdlog.txt") - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{IgnoreDrainFailures: true}} - ruObj.Spec.PostDrain.PostWaitScript = "exit 1" - ruObj.Spec.PostDrainDelaySeconds = 0 - - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - err := rcRollingUpgrade.postDrainHelper("test-instance-id", mockNode, ruObj, nodeSteps, inProcessingNodes, mutex) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - - // assert node was not uncordoned - cmdlog, _ := ioutil.ReadFile("cmdlog.txt") - g.Expect(string(cmdlog)).To(gomega.Equal("")) - os.Remove("cmdlog.txt") -} - -func TestDrainNodeSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{IgnoreDrainFailures: true}, - } - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestDrainNodePreDrainError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PreDrain.Script = "exit 1" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func TestDrainNodePostDrainScriptError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.Script = "exit 1" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func TestDrainNodePostDrainWaitScriptError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - mockKubeCtlCall := "echo" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostDrain.PostWaitScript = "exit 1" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func TestDrainNodePostDrainFailureToDrainNotFound(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - - // Force quit from the rest of the command - mockKubeCtlCall := "echo 'Error from server (NotFound)'; exit 1;" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{IgnoreDrainFailures: true}, - } - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestDrainNodePostDrainFailureToDrain(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - - // Force quit from the rest of the command - mockKubeCtlCall := "exit 1;" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - Strategy: upgrademgrv1alpha1.UpdateStrategy{DrainTimeout: -1}, - }, - } - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - err := rcRollingUpgrade.DrainNode(ruObj, mockNode, "test-id", ruObj.Spec.Strategy.DrainTimeout, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func createReconciler() *RollingUpgradeReconciler { - return &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } -} - -type MockEC2 struct { - ec2iface.EC2API - awsErr awserr.Error - reservations []*ec2.Reservation -} - -type MockAutoscalingGroup struct { - autoscalingiface.AutoScalingAPI - errorFlag bool - awsErr awserr.Error - errorInstanceId string - autoScalingGroups []*autoscaling.Group -} - -func (m MockEC2) CreateTags(_ *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) { - if m.awsErr != nil { - return nil, m.awsErr - } - return &ec2.CreateTagsOutput{}, nil -} - -func (m MockEC2) DescribeInstances(_ *ec2.DescribeInstancesInput) (*ec2.DescribeInstancesOutput, error) { - return &ec2.DescribeInstancesOutput{Reservations: m.reservations}, nil -} - -func (m MockEC2) DescribeInstancesPages(input *ec2.DescribeInstancesInput, callback func(*ec2.DescribeInstancesOutput, bool) bool) error { - page, err := m.DescribeInstances(input) - if err != nil { - return err - } - callback(page, false) - return nil -} - -func (mockAutoscalingGroup MockAutoscalingGroup) EnterStandby(_ *autoscaling.EnterStandbyInput) (*autoscaling.EnterStandbyOutput, error) { - output := &autoscaling.EnterStandbyOutput{} - return output, nil -} - -func (mockAutoscalingGroup MockAutoscalingGroup) DescribeAutoScalingGroups(input *autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error) { - var err error - output := autoscaling.DescribeAutoScalingGroupsOutput{ - AutoScalingGroups: []*autoscaling.Group{}, - } - //To support parallel ASG tracking. - asgA, asgB := "asg-a", "asg-b" - - if mockAutoscalingGroup.errorFlag { - err = mockAutoscalingGroup.awsErr - } - switch *input.AutoScalingGroupNames[0] { - case asgA: - output.AutoScalingGroups = []*autoscaling.Group{ - {AutoScalingGroupName: &asgA}, - } - case asgB: - output.AutoScalingGroups = []*autoscaling.Group{ - {AutoScalingGroupName: &asgB}, - } - default: - output.AutoScalingGroups = mockAutoscalingGroup.autoScalingGroups - } - return &output, err -} - -func (mockAutoscalingGroup MockAutoscalingGroup) TerminateInstanceInAutoScalingGroup(input *autoscaling.TerminateInstanceInAutoScalingGroupInput) (*autoscaling.TerminateInstanceInAutoScalingGroupOutput, error) { - output := &autoscaling.TerminateInstanceInAutoScalingGroupOutput{} - if mockAutoscalingGroup.errorFlag { - if mockAutoscalingGroup.awsErr != nil { - if len(mockAutoscalingGroup.errorInstanceId) <= 0 || - mockAutoscalingGroup.errorInstanceId == *input.InstanceId { - return output, mockAutoscalingGroup.awsErr - } - } - } - asgChange := autoscaling.Activity{ActivityId: aws.String("xxx"), AutoScalingGroupName: aws.String("sss"), Cause: aws.String("xxx"), StartTime: aws.Time(time.Now()), StatusCode: aws.String("200"), StatusMessage: aws.String("success")} - output.Activity = &asgChange - return output, nil -} - -func TestGetInProgressInstances(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockInstances := []*autoscaling.Instance{ - { - InstanceId: aws.String("i-0123foo"), - }, - { - InstanceId: aws.String("i-0123bar"), - }, - } - expectedInstance := &autoscaling.Instance{ - InstanceId: aws.String("i-0123foo"), - } - mockReservations := []*ec2.Reservation{ - { - Instances: []*ec2.Instance{ - { - InstanceId: aws.String("i-0123foo"), - }, - }, - }, - } - reconciler := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - EC2Client: MockEC2{reservations: mockReservations}, - ASGClient: MockAutoscalingGroup{ - errorFlag: false, - awsErr: nil, - }, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - inProgressInstances, err := reconciler.getInProgressInstances(mockInstances) - g.Expect(err).To(gomega.BeNil()) - g.Expect(inProgressInstances).To(gomega.ContainElement(expectedInstance)) - g.Expect(inProgressInstances).To(gomega.HaveLen(1)) -} - -func TestTerminateNodeSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - EC2Client: MockEC2{}, - ASGClient: MockAutoscalingGroup{ - errorFlag: false, - awsErr: nil, - }, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestTerminateNodeErrorNotFound(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New("InvalidInstanceID.NotFound", - "ValidationError: Instance Id not found - No managed instance found for instance ID i-0bba", - nil)} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func init() { - WaiterMaxDelay = time.Second * 2 - WaiterMinDelay = time.Second * 1 - WaiterMaxAttempts = uint32(2) -} - -func TestTerminateNodeErrorScalingActivityInProgressWithRetry(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New(autoscaling.ErrCodeScalingActivityInProgressFault, - "Scaling activities in progress", - nil)} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - go func() { - time.Sleep(WaiterMaxDelay) - rcRollingUpgrade.ASGClient = MockAutoscalingGroup{ - errorFlag: false, - awsErr: nil, - } - }() - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestTerminateNodeErrorScalingActivityInProgress(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New(autoscaling.ErrCodeScalingActivityInProgressFault, - "Scaling activities in progress", - nil)} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err.Error()).To(gomega.ContainSubstring("no more retries left")) -} - -func TestTerminateNodeErrorResourceContention(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New(autoscaling.ErrCodeResourceContentionFault, - "Have a pending update on resource", - nil)} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err.Error()).To(gomega.ContainSubstring("no more retries left")) -} - -func TestTerminateNodeErrorOtherError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New("some-other-aws-error", - "some message", - fmt.Errorf("some error"))} - - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err.Error()).To(gomega.ContainSubstring("some error")) -} - -func TestTerminateNodePostTerminateScriptSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostTerminate.Script = "echo hello!" - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: false, awsErr: nil} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestTerminateNodePostTerminateScriptErrorNotFoundFromServer(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostTerminate.Script = "echo 'Error from server (NotFound)'; exit 1" - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: false, awsErr: nil} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) -} - -func TestTerminateNodePostTerminateScriptErrorOtherError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-id" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - ruObj.Spec.PostTerminate.Script = "exit 1" - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: false, awsErr: nil} - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.TerminateNode(ruObj, mockNode, "", nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) - g.Expect(err.Error()).To(gomega.ContainSubstring("Failed to run postTerminate script: ")) -} - -func TestLoadEnvironmentVariables(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - r := &ScriptRunner{} - - mockID := "fake-id-foo" - mockName := "instance-name-foo" - - env := r.buildEnv(&upgrademgrv1alpha1.RollingUpgrade{ - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: "asg-foo", - }, - }, mockID, mockName) - g.Expect(env).To(gomega.HaveLen(3)) - -} - -func TestGetNodeNameFoundNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockInstanceID := "123456" - autoscalingInstance := autoscaling.Instance{InstanceId: &mockInstanceID} - - fooNode1 := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "fooNode1"}, - Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "fooNode2"}, - Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "correctNode"}, - Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockInstanceID}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - rcRollingUpgrade := createReconciler() - name := rcRollingUpgrade.getNodeName(&autoscalingInstance, &nodeList, ruObj) - - g.Expect(name).To(gomega.Equal("correctNode")) -} - -func TestGetNodeNameMissingNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockInstanceID := "123456" - autoscalingInstance := autoscaling.Instance{InstanceId: &mockInstanceID} - - fooNode1 := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "fooNode1"}, - Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "fooNode2"}, - Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - rcRollingUpgrade := createReconciler() - name := rcRollingUpgrade.getNodeName(&autoscalingInstance, &nodeList, ruObj) - - g.Expect(name).To(gomega.Equal("")) -} - -func TestGetNodeFromAsgFoundNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockInstanceID := "123456" - autoscalingInstance := autoscaling.Instance{InstanceId: &mockInstanceID} - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockInstanceID}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - node := rcRollingUpgrade.getNodeFromAsg(&autoscalingInstance, &nodeList, ruObj) - - g.Expect(node).To(gomega.Not(gomega.BeNil())) - g.Expect(node).To(gomega.Equal(&correctNode)) -} - -func TestGetNodeFromAsgMissingNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockInstanceID := "123456" - autoscalingInstance := autoscaling.Instance{InstanceId: &mockInstanceID} - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2}} - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - node := rcRollingUpgrade.getNodeFromAsg(&autoscalingInstance, &nodeList, ruObj) - - g.Expect(node).To(gomega.BeNil()) -} - -func TestPopulateAsgSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - correctAsg := "correct-asg" - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: &correctAsg, - } - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "correct-asg"}} - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.populateAsg(ruObj) - - g.Expect(err).To(gomega.BeNil()) - - expectedAsg := autoscaling.Group{AutoScalingGroupName: &correctAsg} - - requestedAsg, ok := rcRollingUpgrade.ruObjNameToASG.Load(ruObj.NamespacedName()) - g.Expect(ok).To(gomega.BeTrue()) - g.Expect(requestedAsg.AutoScalingGroupName).To(gomega.Equal(expectedAsg.AutoScalingGroupName)) -} - -func TestPopulateAsgTooMany(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsg1 := &autoscaling.Group{ - AutoScalingGroupName: aws.String("too-many"), - } - mockAsg2 := &autoscaling.Group{ - AutoScalingGroupName: aws.String("too-many"), - } - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg1, mockAsg2}, - } - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "too-many"}} - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - err := rcRollingUpgrade.populateAsg(ruObj) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - g.Expect(err.Error()).To(gomega.ContainSubstring("Too many ASGs")) -} - -func TestPopulateAsgNone(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "no-asg-at-all"}} - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - ASGClient: &MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - } - err := rcRollingUpgrade.populateAsg(ruObj) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - g.Expect(err.Error()).To(gomega.ContainSubstring("no ASG found")) -} - -func TestParallelAsgTracking(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - asgAName := "asg-a" - asgBName := "asg-b" - - ruObjA := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo-a", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: asgAName}} - ruObjB := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo-b", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: asgBName}} - - expectedAsgA := autoscaling.Group{AutoScalingGroupName: &asgAName} - expectedAsgB := autoscaling.Group{AutoScalingGroupName: &asgBName} - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - ASGClient: &MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - } - - err := rcRollingUpgrade.populateAsg(ruObjA) - g.Expect(err).To(gomega.BeNil()) - - err = rcRollingUpgrade.populateAsg(ruObjB) - g.Expect(err).To(gomega.BeNil()) - - //This test ensures that we can lookup each of 2 separate ASGs after populating both - requestedAsgA, ok := rcRollingUpgrade.ruObjNameToASG.Load(ruObjA.NamespacedName()) - g.Expect(ok).To(gomega.BeTrue()) - - requestedAsgB, ok := rcRollingUpgrade.ruObjNameToASG.Load(ruObjB.NamespacedName()) - g.Expect(ok).To(gomega.BeTrue()) - - g.Expect(requestedAsgA.AutoScalingGroupName).To(gomega.Equal(expectedAsgA.AutoScalingGroupName)) - g.Expect(requestedAsgB.AutoScalingGroupName).To(gomega.Equal(expectedAsgB.AutoScalingGroupName)) -} - -type MockNodeList struct { - v1.NodeInterface - - // used to return errors if needed - errorFlag bool -} - -func (nodeInterface *MockNodeList) List(options metav1.ListOptions) (*corev1.NodeList, error) { - list := &corev1.NodeList{} - - if nodeInterface.errorFlag { - return list, fmt.Errorf("error flag raised") - } - - node1 := corev1.Node{TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1beta1"}, - ObjectMeta: metav1.ObjectMeta{Name: "node1"}} - node2 := corev1.Node{TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1beta1"}, - ObjectMeta: metav1.ObjectMeta{Name: "node2"}} - node3 := corev1.Node{TypeMeta: metav1.TypeMeta{Kind: "Node", APIVersion: "v1beta1"}, - ObjectMeta: metav1.ObjectMeta{Name: "node3"}} - - list.Items = []corev1.Node{node1, node2, node3} - return list, nil -} - -func TestPopulateNodeListSuccess(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}} - rcRollingUpgrade := createReconciler() - - mockNodeListInterface := &MockNodeList{errorFlag: false} - err := rcRollingUpgrade.populateNodeList(ruObj, mockNodeListInterface) - - g.Expect(err).To(gomega.BeNil()) - g.Expect(rcRollingUpgrade.NodeList.Items[0].Name).To(gomega.Equal("node1")) - g.Expect(rcRollingUpgrade.NodeList.Items[1].Name).To(gomega.Equal("node2")) - g.Expect(rcRollingUpgrade.NodeList.Items[2].Name).To(gomega.Equal("node3")) -} - -func TestPopulateNodeListError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}} - rcRollingUpgrade := createReconciler() - - mockNodeListInterface := &MockNodeList{errorFlag: true} - err := rcRollingUpgrade.populateNodeList(ruObj, mockNodeListInterface) - - g.Expect(err).To(gomega.Not(gomega.BeNil())) - g.Expect(err.Error()).To(gomega.ContainSubstring("Failed to get all nodes in the cluster:")) -} - -func TestFinishExecutionCompleted(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}} - startTime := time.Now() - ruObj.Status.StartTime = startTime.Format(time.RFC3339) - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{Client: mgr.GetClient(), - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - } - ctx := context.TODO() - mockNodesProcessed := 3 - - rcRollingUpgrade.finishExecution(nil, mockNodesProcessed, &ctx, ruObj) - - g.Expect(ruObj.Status.CurrentStatus).To(gomega.Equal(upgrademgrv1alpha1.StatusComplete)) - g.Expect(ruObj.Status.NodesProcessed).To(gomega.Equal(mockNodesProcessed)) - g.Expect(ruObj.Status.EndTime).To(gomega.Not(gomega.BeNil())) - g.Expect(ruObj.Status.TotalProcessingTime).To(gomega.Not(gomega.BeNil())) - g.Expect(ruObj.Status.Conditions).To(gomega.Equal( - []upgrademgrv1alpha1.RollingUpgradeCondition{ - { - Type: upgrademgrv1alpha1.UpgradeComplete, - Status: corev1.ConditionTrue, - }, - }, - )) -} - -func TestFinishExecutionError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - } - startTime := time.Now() - ruObj.Status.StartTime = startTime.Format(time.RFC3339) - ctx := context.TODO() - mockNodesProcessed := 3 - - err = fmt.Errorf("execution error") - rcRollingUpgrade.finishExecution(err, mockNodesProcessed, &ctx, ruObj) - - g.Expect(ruObj.Status.CurrentStatus).To(gomega.Equal(upgrademgrv1alpha1.StatusError)) - g.Expect(ruObj.Status.NodesProcessed).To(gomega.Equal(mockNodesProcessed)) - g.Expect(ruObj.Status.EndTime).To(gomega.Not(gomega.BeNil())) - g.Expect(ruObj.Status.TotalProcessingTime).To(gomega.Not(gomega.BeNil())) - g.Expect(ruObj.Status.Conditions).To(gomega.Equal( - []upgrademgrv1alpha1.RollingUpgradeCondition{ - { - Type: upgrademgrv1alpha1.UpgradeComplete, - Status: corev1.ConditionTrue, - }, - }, - )) -} - -func TestProcessCompleted(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - TypeMeta: metav1.TypeMeta{Kind: "RollingUpgrade", APIVersion: "v1alpha1"}} - startTime := time.Now() - ruObj.Status.StartTime = startTime.Format(time.RFC3339) - ruObj.Status.CurrentStatus = upgrademgrv1alpha1.StatusComplete - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{Client: mgr.GetClient(), - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - Log: log2.NullLogger{}, - ClusterState: NewClusterState(), - } - ctx := context.TODO() - - rcRollingUpgrade.Process(&ctx, ruObj) - - g.Expect(ruObj.Status.CurrentStatus).To(gomega.Equal(upgrademgrv1alpha1.StatusComplete)) - g.Expect(ruObj.Status.Conditions).To(gomega.BeNil()) -} - -// RunRestack() goes through the entire process without errors -func TestRunRestackSuccessOneNode(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - strategy := upgrademgrv1alpha1.UpdateStrategy{Mode: upgrademgrv1alpha1.UpdateStrategyModeLazy} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg, Strategy: strategy, IgnoreDrainFailures: true}, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - // correctNode has the same mockID as the mockInstance and a node name to be processed - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err).To(gomega.BeNil()) - _, exists := rcRollingUpgrade.inProcessASGs.Load(someAsg) - g.Expect(exists).To(gomega.BeTrue()) -} - -func TestRunRestackSuccessMultipleNodes(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - mockID2 := "some-id-2" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockInstance2 := autoscaling.Instance{InstanceId: &mockID2, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance, &mockInstance2}} - - strategy := upgrademgrv1alpha1.UpdateStrategy{Mode: upgrademgrv1alpha1.UpdateStrategyModeLazy} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg, Strategy: strategy, IgnoreDrainFailures: true}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node2"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode, correctNode2}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(2)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestRunRestackSameLaunchConfig(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - strategy := upgrademgrv1alpha1.UpdateStrategy{Mode: upgrademgrv1alpha1.UpdateStrategyModeLazy} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg, Strategy: strategy}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution should not perform drain or termination, but should pass - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestRunRestackRollingUpgradeNodeNameNotFound(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - strategy := upgrademgrv1alpha1.UpdateStrategy{Mode: upgrademgrv1alpha1.UpdateStrategyModeLazy} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg, Strategy: strategy}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - emptyNodeList := corev1.NodeList{} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &emptyNodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution gets past the different launch config check, but fails to be found at the node level - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestRunRestackNoNodeName(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - strategy := upgrademgrv1alpha1.UpdateStrategy{Mode: upgrademgrv1alpha1.UpdateStrategyModeLazy} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: someAsg, Strategy: strategy}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - // correctNode has the same mockID as the mockInstance - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution gets past the different launch config check, but since there is no node name, it is skipped - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestRunRestackDrainNodeFail(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, - LaunchConfigurationName: &diffLaunchConfig, - AvailabilityZone: &az, - } - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}, - } - - somePreDrain := upgrademgrv1alpha1.PreDrainSpec{ - Script: "exit 1", - } - - // Will fail upon running the preDrain() script - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - PreDrain: somePreDrain, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Mode: "eager", - }, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - // correctNode has the same mockID as the mockInstance and a node name to be processed - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution gets past the different launch config check, but fails to drain the node because of a predrain failing script - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err.Error()).To(gomega.HavePrefix("Error updating instances, ErrorCount: 1, Errors: [")) -} - -func TestRunRestackTerminateNodeFail(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{Mode: "lazy"}, - IgnoreDrainFailures: true, - }, - } - // Error flag set, should return error - mockAutoscalingGroup := MockAutoscalingGroup{errorFlag: true, awsErr: awserr.New("some-other-aws-error", - "some message", - fmt.Errorf("some error"))} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - // correctNode has the same mockID as the mockInstance and a node name to be processed - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAutoscalingGroup, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution gets past the different launch config check, but fails to terminate node - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(1)) - g.Expect(err.Error()).To(gomega.HavePrefix("Error updating instances, ErrorCount: 1, Errors: [")) - g.Expect(err.Error()).To(gomega.ContainSubstring("some error")) -} - -func constructAutoScalingInstance(instanceId string, launchConfigName string, azName string) *autoscaling.Instance { - return &autoscaling.Instance{InstanceId: &instanceId, LaunchConfigurationName: &launchConfigName, AvailabilityZone: &azName} -} - -func TestUniformAcrossAzUpdateSuccessMultipleNodes(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - az2 := "az-2" - az3 := "az-3" - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{ - constructAutoScalingInstance(mockID+"1"+az, diffLaunchConfig, az), - constructAutoScalingInstance(mockID+"2"+az, diffLaunchConfig, az), - constructAutoScalingInstance(mockID+"1"+az2, diffLaunchConfig, az2), - constructAutoScalingInstance(mockID+"2"+az2, diffLaunchConfig, az2), - constructAutoScalingInstance(mockID+"3"+az2, diffLaunchConfig, az2), - constructAutoScalingInstance(mockID+"1"+az3, diffLaunchConfig, az3), - constructAutoScalingInstance(mockID+"2"+az3, diffLaunchConfig, az3), - constructAutoScalingInstance(mockID+"3"+az3, diffLaunchConfig, az3), - constructAutoScalingInstance(mockID+"4"+az3, diffLaunchConfig, az3), - }, - } - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Mode: "lazy", - Type: upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy, - }, - IgnoreDrainFailures: true, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode1az1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "1" + az}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2az1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "2" + az}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode1az2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "1" + az2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2az2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "2" + az2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode3az2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "3" + az2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode1az3 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "1" + az3}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2az3 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "2" + az3}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode3az3 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "3" + az3}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode4az3 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID + "4" + az3}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{ - fooNode1, fooNode2, - correctNode1az1, correctNode2az1, - correctNode1az2, correctNode2az2, correctNode3az2, - correctNode1az3, correctNode2az3, correctNode3az3, correctNode4az3, - }} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(9)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestUpdateInstances(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - mockID2 := "some-id-2" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockInstance2 := autoscaling.Instance{InstanceId: &mockID2, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance, &mockInstance2}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{Mode: "lazy"}, - IgnoreDrainFailures: true, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node2"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode, correctNode2}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - lcName := "A" - rcRollingUpgrade.ScriptRunner.KubectlCall = "exit 0;" - - err = rcRollingUpgrade.UpdateInstances(&ctx, - ruObj, mockAsg.Instances, &launchDefinition{launchConfigurationName: &lcName}) - g.Expect(err).ShouldNot(gomega.HaveOccurred()) - g.Expect(ruObj.Status.Statistics).ShouldNot(gomega.BeEmpty()) - g.Expect(ruObj.Status.LastBatchNodes).ShouldNot(gomega.BeEmpty()) - g.Expect(ruObj.Status.LastNodeDrainTime).ShouldNot(gomega.BeNil()) - g.Expect(ruObj.Status.LastNodeTerminationTime).ShouldNot(gomega.BeNil()) -} - -func TestUpdateInstancesError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - mockID2 := "some-id-2" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockInstance2 := autoscaling.Instance{InstanceId: &mockID2, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance, &mockInstance2}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Mode: "lazy", - }, - }, - } - - mockAutoScalingGroup := MockAutoscalingGroup{ - errorFlag: true, - awsErr: awserr.New("UnKnownError", - "some message", - nil)} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node2"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode, correctNode2}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAutoScalingGroup, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - rcRollingUpgrade.ScriptRunner.KubectlCall = "exit 0;" - - ctx := context.TODO() - - lcName := "A" - err = rcRollingUpgrade.UpdateInstances(&ctx, - ruObj, mockAsg.Instances, &launchDefinition{launchConfigurationName: &lcName}) - g.Expect(err).Should(gomega.HaveOccurred()) - g.Expect(err).Should(gomega.BeAssignableToTypeOf(&UpdateInstancesError{})) - if updateInstancesError, ok := err.(*UpdateInstancesError); ok { - g.Expect(len(updateInstancesError.InstanceUpdateErrors)).Should(gomega.Equal(2)) - g.Expect(updateInstancesError.Error()).Should(gomega.ContainSubstring("Error updating instances, ErrorCount: 2")) - } -} - -func TestUpdateInstancesHandlesDeletedInstances(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Mode: "lazy", - }, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, correctNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{ - awsErr: awserr.New("InvalidInstanceID.NotFound", "Instance not found", nil), - }, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - rcRollingUpgrade.ScriptRunner.KubectlCall = "exit 0;" - - ctx := context.TODO() - - lcName := "A" - err = rcRollingUpgrade.UpdateInstances(&ctx, - ruObj, mockAsg.Instances, &launchDefinition{launchConfigurationName: &lcName}) - g.Expect(err).Should(gomega.BeNil()) -} - -func TestUpdateInstancesPartialError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - mockID2 := "some-id-2" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockInstance2 := autoscaling.Instance{InstanceId: &mockID2, LaunchConfigurationName: &diffLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance, &mockInstance2}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{Mode: "lazy"}, - IgnoreDrainFailures: true, - }, - } - - mockAutoScalingGroup := MockAutoscalingGroup{ - errorFlag: true, - awsErr: awserr.New("UnKnownError", - "some message", - nil), - errorInstanceId: mockID2, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - fooNode1 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - fooNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/1234501"}} - correctNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node"}} - correctNode2 := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID2}, - ObjectMeta: metav1.ObjectMeta{Name: "correct-node2"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode1, fooNode2, correctNode, correctNode2}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAutoScalingGroup, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - rcRollingUpgrade.ScriptRunner.KubectlCall = "exit 0;" - ctx := context.TODO() - - lcName := "A" - err = rcRollingUpgrade.UpdateInstances(&ctx, - ruObj, mockAsg.Instances, &launchDefinition{launchConfigurationName: &lcName}) - g.Expect(err).Should(gomega.HaveOccurred()) - g.Expect(err).Should(gomega.BeAssignableToTypeOf(&UpdateInstancesError{})) - if updateInstancesError, ok := err.(*UpdateInstancesError); ok { - g.Expect(len(updateInstancesError.InstanceUpdateErrors)).Should(gomega.Equal(1)) - g.Expect(updateInstancesError.Error()).Should(gomega.Equal("Error updating instances, ErrorCount: 1, Errors: [UnKnownError: some message]")) - } -} - -func TestUpdateInstancesWithZeroInstances(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - rcRollingUpgrade.ScriptRunner.KubectlCall = "exit 0;" - - ctx := context.TODO() - - lcName := "A" - err = rcRollingUpgrade.UpdateInstances(&ctx, - nil, nil, &launchDefinition{launchConfigurationName: &lcName}) - g.Expect(err).ShouldNot(gomega.HaveOccurred()) -} - -func TestTestCallKubectlDrainWithoutDrainTimeout(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "sleep 1; echo" - mockNodeName := "some-node-name" - mockAsgName := "some-asg" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("completed")) -} - -func TestTestCallKubectlDrainWithDrainTimeout(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "sleep 1; echo" - mockNodeName := "some-node-name" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - mockAsgName := "some-asg" - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("completed")) -} - -func TestTestCallKubectlDrainWithZeroDrainTimeout(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "sleep 1; echo" - mockNodeName := "some-node-name" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - mockAsgName := "some-asg" - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("completed")) -} - -func TestTestCallKubectlDrainWithError(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "cat xyz" - mockNodeName := "some-node-name" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - mockAsgName := "some-asg" - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("error")) -} - -func TestTestCallKubectlDrainWithTimeoutOccurring(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "sleep 1; echo" - mockNodeName := "some-node-name" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - - mockAsgName := "some-asg" - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - ctx, cancel := context.WithTimeout(ctx, 50*time.Millisecond) - defer cancel() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("timed-out")) -} - -func TestTestCallKubectlDrainIgnoresNoiseInOutput(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockKubeCtlCall := "echo 'I0105 Throttling request took 1.097511969s\\nError from server (NotFound): nodes \\\"some-node\\\" not found'; exit 1" - mockNodeName := "some-node" - mockAsgName := "some-asg" - rcRollingUpgrade := createReconciler() - rcRollingUpgrade.ScriptRunner.KubectlCall = mockKubeCtlCall - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: mockAsgName}} - - errChan := make(chan error) - ctx := context.TODO() - - go rcRollingUpgrade.CallKubectlDrain(mockNodeName, ruObj, errChan) - - output := "" - select { - case <-ctx.Done(): - log.Printf("Kubectl drain timed out for node - %s", mockNodeName) - log.Print(ctx.Err()) - output = "timed-out" - break - case err := <-errChan: - if err != nil { - log.Printf("Kubectl drain errored for node - %s, error: %s", mockNodeName, err.Error()) - output = "error" - break - } - log.Printf("Kubectl drain completed for node - %s", mockNodeName) - output = "completed" - break - } - - g.Expect(output).To(gomega.ContainSubstring("completed")) -} - -func TestValidateRuObj(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("75"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateruObjInvalidMaxUnavailable(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("150%"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for maxUnavailable")) -} - -func TestValidateruObjMaxUnavailableZeroPercent(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("0%"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for maxUnavailable")) -} - -func TestValidateruObjMaxUnavailableInt(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("10"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateruObjMaxUnavailableIntZero(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("0"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for maxUnavailable")) -} - -func TestValidateruObjMaxUnavailableIntNegativeValue(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("-1"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for maxUnavailable")) -} - -func TestValidateruObjWithStrategyAndDrainTimeoutOnly(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for maxUnavailable")) -} - -func TestValidateruObjWithoutStrategyOnly(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - ruObj := upgrademgrv1alpha1.RollingUpgrade{} - - rcRollingUpgrade := createReconciler() - err := rcRollingUpgrade.validateRollingUpgradeObj(&ruObj) - - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateruObjStrategyType(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("10"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateruObjInvalidStrategyType(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: "xyx", - MaxUnavailable: intstr.Parse("10"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err.Error()).To(gomega.ContainSubstring("Invalid value for strategy type")) -} - -func TestValidateruObjWithYaml(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - strategyYaml := ` -drainTimeout: 30 -maxUnavailable: 100% -type: randomUpdate -` - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{} - err := yaml.Unmarshal([]byte(strategyYaml), &strategy) - if err != nil { - fmt.Printf("Error occurred while unmarshalling strategy yaml object, error: %s", err.Error()) - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - err = rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestSetDefaultsForRollingUpdateStrategy(t *testing.T) { - - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{} - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - - g.Expect(string(ruObj.Spec.Strategy.Type)).To(gomega.ContainSubstring(string(upgrademgrv1alpha1.RandomUpdateStrategy))) - g.Expect(ruObj.Spec.Strategy.DrainTimeout).To(gomega.Equal(-1)) - g.Expect(ruObj.Spec.Strategy.MaxUnavailable).To(gomega.Equal(intstr.IntOrString{Type: 0, IntVal: 1})) -} - -func TestValidateruObjStrategyAfterSettingDefaults(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - err := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateruObjStrategyAfterSettingDefaultsWithInvalidStrategyType(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: "xyz", - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - error := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - - g.Expect(error).To(gomega.Not(gomega.BeNil())) -} - -func TestValidateruObjStrategyAfterSettingDefaultsWithOnlyDrainTimeout(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - DrainTimeout: 15, - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - error := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - - g.Expect(error).To(gomega.BeNil()) -} - -func TestValidateruObjStrategyAfterSettingDefaultsWithOnlyMaxUnavailable(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockAsgName := "some-asg" - strategy := upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - MaxUnavailable: intstr.Parse("100%"), - } - - rcRollingUpgrade := createReconciler() - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: mockAsgName, - Strategy: strategy, - }, - } - rcRollingUpgrade.setDefaultsForRollingUpdateStrategy(ruObj) - error := rcRollingUpgrade.validateRollingUpgradeObj(ruObj) - - g.Expect(error).To(gomega.BeNil()) -} - -func TestRunRestackNoNodeInAsg(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - someLaunchConfig := "some-launch-config" - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{Type: upgrademgrv1alpha1.RandomUpdateStrategy}, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - nodeList := corev1.NodeList{Items: []corev1.Node{}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - admissionMap: sync.Map{}, - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - - ctx := context.TODO() - - // This execution gets past the different launch config check, but since there is no node name, it is skipped - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(nodesProcessed).To(gomega.Equal(0)) - g.Expect(err).To(gomega.BeNil()) -} - -func TestWaitForTermination(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - TerminationTimeoutSeconds = 1 - TerminationSleepIntervalSeconds = 1 - - mockNodeName := "node-123" - mockNode := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: mockNodeName, - }, - } - kuberenetesClient := fake.NewSimpleClientset() - nodeInterface := kuberenetesClient.CoreV1().Nodes() - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - } - _, err = nodeInterface.Create(mockNode) - g.Expect(err).NotTo(gomega.HaveOccurred()) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - } - - unjoined, err := rcRollingUpgrade.WaitForTermination(ruObj, mockNodeName, nodeInterface) - g.Expect(err).NotTo(gomega.HaveOccurred()) - g.Expect(unjoined).To(gomega.BeFalse()) - - err = nodeInterface.Delete(mockNodeName, &metav1.DeleteOptions{}) - g.Expect(err).NotTo(gomega.HaveOccurred()) - - unjoined, err = rcRollingUpgrade.WaitForTermination(ruObj, mockNodeName, nodeInterface) - g.Expect(err).NotTo(gomega.HaveOccurred()) - g.Expect(unjoined).To(gomega.BeTrue()) -} - -func TestWaitForTerminationWhenNodeIsNotFound(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - TerminationTimeoutSeconds = 1 - TerminationSleepIntervalSeconds = 1 - - // nodeName is empty when a node is not found. - mockNodeName := "" - mockNode := &corev1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: mockNodeName, - }, - } - kuberenetesClient := fake.NewSimpleClientset() - nodeInterface := kuberenetesClient.CoreV1().Nodes() - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - } - _, err = nodeInterface.Create(mockNode) - g.Expect(err).NotTo(gomega.HaveOccurred()) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Namespace: "default", - }, - } - - unjoined, err := rcRollingUpgrade.WaitForTermination(ruObj, mockNodeName, nodeInterface) - g.Expect(unjoined).To(gomega.BeTrue()) - g.Expect(err).To(gomega.BeNil()) -} - -func buildManager() (manager.Manager, error) { - err := upgrademgrv1alpha1.AddToScheme(scheme.Scheme) - if err != nil { - return nil, err - } - return manager.New(cfg, manager.Options{MetricsBindAddress: "0"}) -} - -func TestRunRestackWithNodesLessThanMaxUnavailable(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - MaxUnavailable: intstr.IntOrString{Type: 0, IntVal: 2}, - Type: upgrademgrv1alpha1.RandomUpdateStrategy, - }, - }, - } - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - rcRollingUpgrade.ClusterState.deleteAllInstancesInAsg(someAsg) - ctx := context.TODO() - - // This execution should not perform drain or termination, but should pass - nodesProcessed, err := rcRollingUpgrade.runRestack(&ctx, ruObj) - g.Expect(err).To(gomega.BeNil()) - g.Expect(nodesProcessed).To(gomega.Equal(1)) -} - -func TestRequiresRefreshHandlesLaunchConfiguration(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - - mockID := "some-id" - someLaunchConfig := "some-launch-config-v1" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - - newLaunchConfig := "some-launch-config-v2" - definition := launchDefinition{ - launchConfigurationName: &newLaunchConfig, - } - - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) -} - -func TestRequiresRefreshHandlesLaunchTemplateNameVersionUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockID := "some-id" - oldLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("launch-template"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: oldLaunchTemplate, AvailabilityZone: &az} - - newLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("launch-template"), - Version: aws.String("2"), - } - definition := launchDefinition{ - launchTemplate: newLaunchTemplate, - } - - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) -} - -func TestRequiresRefreshHandlesLaunchTemplateIDVersionUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockID := "some-id" - oldLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: oldLaunchTemplate, AvailabilityZone: &az} - - newLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("2"), - } - definition := launchDefinition{ - launchTemplate: newLaunchTemplate, - } - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) -} - -func TestRequiresRefreshHandlesLaunchTemplateNameUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockID := "some-id" - oldLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("launch-template"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: oldLaunchTemplate, AvailabilityZone: &az} - - newLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("launch-template-v2"), - Version: aws.String("1"), - } - definition := launchDefinition{ - launchTemplate: newLaunchTemplate, - } - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) -} - -func TestRequiresRefreshHandlesLaunchTemplateIDUpdate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockID := "some-id" - oldLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: oldLaunchTemplate, AvailabilityZone: &az} - - newLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v2"), - Version: aws.String("1"), - } - definition := launchDefinition{ - launchTemplate: newLaunchTemplate, - } - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) -} - -func TestRequiresRefreshNotUpdateIfNoVersionChange(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockID := "some-id" - instanceLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: instanceLaunchTemplate, AvailabilityZone: &az} - - launchTemplate := &ec2.LaunchTemplate{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - LatestVersionNumber: aws.Int64(1), - } - definition := launchDefinition{ - launchTemplate: instanceLaunchTemplate, - } - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - r.LaunchTemplates = append(r.LaunchTemplates, launchTemplate) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(false)) -} - -func TestForceRefresh(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - // Even if launchtemplate is identical but forceRefresh is set, requiresRefresh should return true. - mockID := "some-id" - launchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("1"), - } - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchTemplate: launchTemplate, AvailabilityZone: &az} - - definition := launchDefinition{ - launchTemplate: launchTemplate, - } - - ec2launchTemplate := &ec2.LaunchTemplate{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - LatestVersionNumber: aws.Int64(1), - } - - r := &RollingUpgradeReconciler{Log: log2.NullLogger{}} - r.LaunchTemplates = append(r.LaunchTemplates, ec2launchTemplate) - currentTime := metav1.NewTime(metav1.Now().Time) - oldTime := metav1.NewTime(currentTime.Time.AddDate(0, 0, -1)) - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default", CreationTimestamp: currentTime}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - Strategy: upgrademgrv1alpha1.UpdateStrategy{DrainTimeout: -1}, - ForceRefresh: true, - }, - } - // If the node was created before the rollingupgrade object, requiresRefresh should return true - k8sNode := corev1.Node{ObjectMeta: metav1.ObjectMeta{Name: "k8sNode", CreationTimestamp: oldTime}, - Spec: corev1.NodeSpec{ProviderID: "fake-separator/" + mockID}} - nodeList := corev1.NodeList{Items: []corev1.Node{k8sNode}} - r.NodeList = &nodeList - result := r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) - - // If the node was created at the same time as rollingupgrade object, requiresRefresh should return false - k8sNode.CreationTimestamp = currentTime - nodeList = corev1.NodeList{Items: []corev1.Node{k8sNode}} - r.NodeList = &nodeList - result = r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(false)) - - // Reset the timestamp on the k8s node - k8sNode.CreationTimestamp = oldTime - nodeList = corev1.NodeList{Items: []corev1.Node{k8sNode}} - r.NodeList = &nodeList - - // If launchTempaltes are different and forceRefresh is true, requiresRefresh should return true - newLaunchTemplate := &autoscaling.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("launch-template-id-v1"), - Version: aws.String("1"), - } - - definition = launchDefinition{ - launchTemplate: newLaunchTemplate, - } - result = r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) - - // If launchTemplares are identical AND forceRefresh is false, requiresRefresh should return false - ruObj.Spec.ForceRefresh = false - result = r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(false)) - - // If launchConfigs are identical but forceRefresh is true, requiresRefresh should return true - ruObj.Spec.ForceRefresh = true - launchConfig := "launch-config" - mockInstance = autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &launchConfig, AvailabilityZone: &az} - definition = launchDefinition{ - launchConfigurationName: &launchConfig, - } - result = r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(true)) - - // If launchConfigs are identical AND forceRefresh is false, requiresRefresh should return false - ruObj.Spec.ForceRefresh = false - result = r.requiresRefresh(ruObj, &mockInstance, &definition) - g.Expect(result).To(gomega.Equal(false)) -} - -func TestDrainNodeTerminateTerminatesWhenIgnoreDrainFailuresSet(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - - mockKubeCtlCall := "" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - Strategy: upgrademgrv1alpha1.UpdateStrategy{DrainTimeout: -1}, - IgnoreDrainFailures: true, - PreDrain: upgrademgrv1alpha1.PreDrainSpec{ - Script: mockKubeCtlCall, - }, - }, - } - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - EC2Client: MockEC2{}, - ASGClient: MockAutoscalingGroup{ - errorFlag: false, - awsErr: nil, - }, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - err := rcRollingUpgrade.DrainTerminate(ruObj, mockNode, mockNode, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) // don't expect errors. - g.Expect(ruObj.Status.LastNodeDrainTime).ShouldNot(gomega.BeNil()) - g.Expect(ruObj.Status.LastNodeTerminationTime).ShouldNot(gomega.BeNil()) - - // nodeName is empty when node isn't part of the cluster. It must skip drain and terminate. - err = rcRollingUpgrade.DrainTerminate(ruObj, "", mockNode, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.BeNil()) // don't expect errors. - -} - -func TestPreDrainFailureWhenIgnoreDrainFailuresSet(t *testing.T) { - g := gomega.NewGomegaWithT(t) - mockNode := "some-node-name" - - mockKubeCtlCall := "exit 1;" - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - Strategy: upgrademgrv1alpha1.UpdateStrategy{DrainTimeout: -1}, - IgnoreDrainFailures: true, - PreDrain: upgrademgrv1alpha1.PreDrainSpec{ - Script: mockKubeCtlCall, - }, - }, - } - rcRollingUpgrade := &RollingUpgradeReconciler{ - ClusterState: NewClusterState(), - Log: log2.NullLogger{}, - EC2Client: MockEC2{}, - ASGClient: MockAutoscalingGroup{ - errorFlag: false, - awsErr: nil, - }, - ScriptRunner: NewScriptRunner(log2.NullLogger{}), - } - - err := rcRollingUpgrade.DrainTerminate(ruObj, mockNode, mockNode, nodeSteps, inProcessingNodes, mutex) - g.Expect(err).To(gomega.Not(gomega.BeNil())) // expect error, as the predrain failure shouldn't be masked when IgnoreDrainFailures is set. -} -func TestUpdateInstancesNotExists(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: &mockID, LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Mode: "lazy", - }, - }, - } - - mgr, _ := buildManager() - client := mgr.GetClient() - fooNode := corev1.Node{Spec: corev1.NodeSpec{ProviderID: "foo-bar/9213851"}} - - nodeList := corev1.NodeList{Items: []corev1.Node{fooNode}} - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: client, - Log: log2.NullLogger{}, - ASGClient: MockAutoscalingGroup{}, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - NodeList: &nodeList, - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), &mockAsg) - rcRollingUpgrade.ScriptRunner.KubectlCall = "date" - - // Intentionally do not populate the admissionMap with the ruObj - - ctx := context.TODO() - lcName := "A" - instChan := make(chan error) - mockInstanceName1 := "foo1" - instance1 := autoscaling.Instance{InstanceId: &mockInstanceName1, AvailabilityZone: &az} - go rcRollingUpgrade.UpdateInstance(&ctx, ruObj, &instance1, &launchDefinition{launchConfigurationName: &lcName}, instChan, nodeSteps, inProcessingNodes, mutex) - processCount := 0 - select { - case <-ctx.Done(): - break - case err := <-instChan: - if err == nil { - processCount++ - } - break - } - - g.Expect(processCount).To(gomega.Equal(1)) -} - -func TestValidateNodesLaunchDefinitionSameLaunchConfig(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someLaunchConfig := "some-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: aws.String("some-id"), LifecycleState: aws.String("InService"), LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: aws.String("my-asg"), - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}, - } - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), mockAsg) - - err = rcRollingUpgrade.validateNodesLaunchDefinition(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateNodesLaunchDefinitionDifferentLaunchConfig(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someLaunchConfig := "some-launch-config" - someOtherLaunchConfig := "some-other-launch-config" - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: aws.String("some-id"), LifecycleState: aws.String("InService"), LaunchConfigurationName: &someLaunchConfig, AvailabilityZone: &az} - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: aws.String("my-asg"), - LaunchConfigurationName: &someOtherLaunchConfig, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), mockAsg) - - err = rcRollingUpgrade.validateNodesLaunchDefinition(ruObj) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func TestValidateNodesLaunchDefinitionSameLaunchTemplate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - someLaunchTemplate := &autoscaling.LaunchTemplateSpecification{LaunchTemplateId: aws.String("launch-template-id-v1")} - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: aws.String("some-id"), LifecycleState: aws.String("InService"), LaunchTemplate: someLaunchTemplate, AvailabilityZone: &az} - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: aws.String("my-asg"), - LaunchTemplate: someLaunchTemplate, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), mockAsg) - - err = rcRollingUpgrade.validateNodesLaunchDefinition(ruObj) - g.Expect(err).To(gomega.BeNil()) -} - -func TestValidateNodesLaunchDefinitionDifferentLaunchTemplate(t *testing.T) { - g := gomega.NewGomegaWithT(t) - someLaunchTemplate := &autoscaling.LaunchTemplateSpecification{LaunchTemplateId: aws.String("launch-template-id-v1")} - someOtherLaunchTemplate := &autoscaling.LaunchTemplateSpecification{LaunchTemplateId: aws.String("launch-template-id-v2")} - az := "az-1" - mockInstance := autoscaling.Instance{InstanceId: aws.String("some-id"), LifecycleState: aws.String("InService"), LaunchTemplate: someLaunchTemplate, AvailabilityZone: &az} - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: aws.String("my-asg"), - LaunchTemplate: someOtherLaunchTemplate, - Instances: []*autoscaling.Instance{&mockInstance}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), mockAsg) - - err = rcRollingUpgrade.validateNodesLaunchDefinition(ruObj) - g.Expect(err).To(gomega.Not(gomega.BeNil())) -} - -func TestValidateNodesLaunchDefinitionMixedInstanceState(t *testing.T) { - g := gomega.NewGomegaWithT(t) - someLaunchTemplate := &autoscaling.LaunchTemplateSpecification{LaunchTemplateId: aws.String("launch-template-id-v1")} - someOtherLaunchTemplate := &autoscaling.LaunchTemplateSpecification{LaunchTemplateId: aws.String("launch-template-id-v2")} - az := "az-1" - mockInstance1 := autoscaling.Instance{InstanceId: aws.String("some-id-1"), LifecycleState: aws.String("InService"), LaunchTemplate: someLaunchTemplate, AvailabilityZone: &az} - mockInstance2 := autoscaling.Instance{InstanceId: aws.String("some-id-2"), LifecycleState: aws.String("Pending"), LaunchTemplate: someOtherLaunchTemplate, AvailabilityZone: &az} - mockInstance3 := autoscaling.Instance{InstanceId: aws.String("some-id-3"), LifecycleState: aws.String("Terminating"), LaunchTemplate: someOtherLaunchTemplate, AvailabilityZone: &az} - mockInstance4 := autoscaling.Instance{InstanceId: aws.String("some-id-4"), LifecycleState: aws.String("InService"), LaunchTemplate: someLaunchTemplate, AvailabilityZone: &az} - mockAsg := &autoscaling.Group{ - AutoScalingGroupName: aws.String("my-asg"), - LaunchTemplate: someLaunchTemplate, - Instances: []*autoscaling.Instance{&mockInstance1, &mockInstance2, &mockInstance3, &mockInstance4}} - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}} - - mgr, err := buildManager() - g.Expect(err).NotTo(gomega.HaveOccurred()) - - mockAsgClient := MockAutoscalingGroup{ - autoScalingGroups: []*autoscaling.Group{mockAsg}, - } - - rcRollingUpgrade := &RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: log2.NullLogger{}, - ASGClient: mockAsgClient, - EC2Client: MockEC2{}, - generatedClient: kubernetes.NewForConfigOrDie(mgr.GetConfig()), - ClusterState: NewClusterState(), - CacheConfig: cache.NewConfig(0*time.Second, 0, 0), - } - rcRollingUpgrade.admissionMap.Store(ruObj.NamespacedName(), "processing") - rcRollingUpgrade.ruObjNameToASG.Store(ruObj.NamespacedName(), mockAsg) - - err = rcRollingUpgrade.validateNodesLaunchDefinition(ruObj) - g.Expect(err).To((gomega.BeNil())) -} diff --git a/controllers/rollup_cluster_state.go b/controllers/rollup_cluster_state.go deleted file mode 100644 index 64167f55..00000000 --- a/controllers/rollup_cluster_state.go +++ /dev/null @@ -1,176 +0,0 @@ -/* -Copyright 2019 Intuit, Inc.. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "sync" - - "github.com/aws/aws-sdk-go/service/autoscaling" -) - -const ( - // updateStarted indicates that the update process has been started for an instance - updateInitialized = "new" - // updateInProgress indicates that update has been triggered for an instance - updateInProgress = "in-progress" - // updateCompleted indicates that update is completed for an instance - updateCompleted = "completed" -) - -// ClusterState contains the methods to store the instance states during a cluster update -type ClusterState interface { - markUpdateInitialized(instanceId string) - markUpdateInProgress(instanceId string) - markUpdateCompleted(instanceId string) - instanceUpdateInitialized(instanceId string) bool - instanceUpdateInProgress(instanceId string) bool - instanceUpdateCompleted(instanceId string) bool - deleteAllInstancesInAsg(asgName string) bool - getNextAvailableInstanceIdInAz(asgName string, azName string) string - initializeAsg(asgName string, instances []*autoscaling.Instance) - addInstanceState(instanceData *InstanceData) - updateInstanceState(instanceId, instanceState string) - getInstanceState(instanceId string) string -} - -type InstanceData struct { - Id string - AmiId string - AsgName string - AzName string - InstanceState string -} - -// ClusterStateImpl implements the ClusterState interface -type ClusterStateImpl struct { - mu sync.RWMutex - - // store stores the state of the instances running in different Azs for multiple ASGs - store sync.Map -} - -// newClusterState returns the object the struct implementing the ClusterState interface -func NewClusterState() ClusterState { - return &ClusterStateImpl{} -} - -// markUpdateInProgress updates the instance state to in-progress -func (c *ClusterStateImpl) markUpdateInProgress(instanceId string) { - c.updateInstanceState(instanceId, updateInProgress) -} - -// markUpdateCompleted updates the instance state to completed iff it is in-progress -func (c *ClusterStateImpl) markUpdateCompleted(instanceId string) { - c.mu.Lock() - defer c.mu.Unlock() - - if c.instanceUpdateInProgress(instanceId) { - c.updateInstanceState(instanceId, updateCompleted) - } -} - -// instanceUpdateInProgress returns true if the instance update is in progress -func (c *ClusterStateImpl) instanceUpdateInProgress(instanceId string) bool { - return c.getInstanceState(instanceId) == updateInProgress -} - -// instanceUpdateCompleted returns true if the instance update is completed -func (c *ClusterStateImpl) instanceUpdateCompleted(instanceId string) bool { - return c.getInstanceState(instanceId) == updateCompleted -} - -// deleteEntryOfAsg deletes the entry for an ASG in the cluster state map -func (c *ClusterStateImpl) deleteAllInstancesInAsg(asgName string) bool { - deleted := false - c.store.Range(func(key interface{}, value interface{}) bool { - instanceID, _ := key.(string) - instanceData, _ := value.(*InstanceData) - if instanceData.AsgName == asgName { - c.store.Delete(instanceID) - deleted = true - } - return true - }) - return deleted -} - -// markUpdateInitialized updates the instance state to in-progresss -func (c *ClusterStateImpl) markUpdateInitialized(instanceId string) { - c.updateInstanceState(instanceId, updateInitialized) -} - -// instanceUpdateInitialized returns true if the instance update is in progress -func (c *ClusterStateImpl) instanceUpdateInitialized(instanceId string) bool { - return c.getInstanceState(instanceId) == updateInitialized -} - -// initializeAsg adds an entry for all the instances in an ASG with updateInitialized state -func (c *ClusterStateImpl) initializeAsg(asgName string, instances []*autoscaling.Instance) { - for _, instance := range instances { - instanceData := &InstanceData{ - Id: *instance.InstanceId, - AzName: *instance.AvailabilityZone, - AsgName: asgName, - InstanceState: updateInitialized, - } - c.addInstanceState(instanceData) - } -} - -// getNextAvailableInstanceId returns the id of the next instance available for update in an ASG -// adding a mutex to avoid the race conditions and same instance returned for 2 go-routines -func (c *ClusterStateImpl) getNextAvailableInstanceIdInAz(asgName string, azName string) string { - c.mu.Lock() - defer c.mu.Unlock() - - instanceId := "" - c.store.Range(func(key interface{}, value interface{}) bool { - state, _ := value.(*InstanceData) - if state.AsgName == asgName && - (azName == "" || state.AzName == azName) && - state.InstanceState == updateInitialized { - c.markUpdateInProgress(state.Id) - instanceId = state.Id - return false - } - return true - }) - return instanceId -} - -// updateInstanceState updates the state of the instance in cluster store -func (c *ClusterStateImpl) addInstanceState(instanceData *InstanceData) { - c.store.Store(instanceData.Id, instanceData) -} - -// updateInstanceState updates the state of the instance in cluster store -func (c *ClusterStateImpl) updateInstanceState(instanceId, instanceState string) { - if val, ok := c.store.Load(instanceId); ok { - instanceData, _ := val.(*InstanceData) - instanceData.InstanceState = instanceState - c.store.Store(instanceId, instanceData) - } -} - -// getInstanceState returns the state of the instance from cluster store -func (c *ClusterStateImpl) getInstanceState(instanceId string) string { - if val, ok := c.store.Load(instanceId); ok { - state, _ := val.(*InstanceData) - return state.InstanceState - } - return "" -} diff --git a/controllers/rollup_cluster_state_test.go b/controllers/rollup_cluster_state_test.go deleted file mode 100644 index 6e3f4c05..00000000 --- a/controllers/rollup_cluster_state_test.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2019 Intuit, Inc.. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/autoscaling" - "github.com/onsi/gomega" -) - -var clusterState = NewClusterState() - -func TestMarkUpdateInProgress(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-1" - clusterState.markUpdateInProgress(mockNodeName) - - g.Expect(clusterState.instanceUpdateInProgress(mockNodeName)).To(gomega.BeTrue()) -} - -func TestMarkUpdateCompleted(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-1" - clusterState.markUpdateInProgress(mockNodeName) - clusterState.markUpdateCompleted(mockNodeName) - - g.Expect(clusterState.instanceUpdateCompleted(mockNodeName)).To(gomega.BeTrue()) -} - -func TestMarkUpdateInitialized(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-1" - clusterState.markUpdateInitialized(mockNodeName) - - g.Expect(clusterState.instanceUpdateInitialized(mockNodeName)).To(gomega.BeTrue()) -} - -func TestInstanceUpdateInitialized(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - mockNodeName := "instance-3" - g.Expect(clusterState.instanceUpdateInitialized(mockNodeName)).To(gomega.BeFalse()) -} - -func TestInstanceUpdateInProgress(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-2" - - g.Expect(clusterState.instanceUpdateInProgress(mockNodeName)).To(gomega.BeFalse()) -} - -func TestInstanceUpdateCompleted(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-1" - - g.Expect(clusterState.instanceUpdateCompleted(mockNodeName)).To(gomega.BeFalse()) -} - -func TestUpdateInstanceState(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockNodeName := "instance-1" - mockInstanceState := "to-be-updated" - clusterState.updateInstanceState(mockNodeName, mockInstanceState) - - g.Expect(clusterState.getInstanceState(mockNodeName)).To(gomega.ContainSubstring(mockInstanceState)) -} - -func TestInitializeAsg(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - - instanceIds := []string{"instance-1", "instance-2"} - for _, instance := range instanceIds { - g.Expect(clusterState.instanceUpdateInitialized(instance)).To(gomega.BeTrue()) - } -} - -func TestDeleteEntryOfAsg(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockAsgName := "asg-1" - - g.Expect(clusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) - g.Expect(clusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeFalse()) -} - -func TestInstanceStateUpdateSequence(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockAsgName := "asg-1" - mockNodeName := "instance-1" - clusterState.markUpdateInitialized(mockNodeName) - g.Expect(clusterState.instanceUpdateInitialized(mockNodeName)).To(gomega.BeTrue()) - clusterState.markUpdateInProgress(mockNodeName) - g.Expect(clusterState.instanceUpdateInProgress(mockNodeName)).To(gomega.BeTrue()) - clusterState.markUpdateCompleted(mockNodeName) - g.Expect(clusterState.instanceUpdateCompleted(mockNodeName)).To(gomega.BeTrue()) - - g.Expect(clusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeTrue()) - g.Expect(clusterState.deleteAllInstancesInAsg(mockAsgName)).To(gomega.BeFalse()) -} - -func TestGetNextAvailableInstanceIdInAz(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - populateClusterState() - mockAsgName := "asg-1" - mockInstance1 := "instance-1" - mockInstance2 := "instance-2" - - clusterState.markUpdateInProgress(mockInstance1) - - g.Expect(clusterState.getNextAvailableInstanceIdInAz(mockAsgName, "az-1")).To(gomega.ContainSubstring(mockInstance2)) - - g.Expect(clusterState.getNextAvailableInstanceIdInAz(mockAsgName, "az-2")).To(gomega.ContainSubstring("")) -} - -func populateClusterState() { - asgName := "asg-1" - instance1 := "instance-1" - instance2 := "instance-2" - az := "az-1" - instances := []*autoscaling.Instance{} - instances = append(instances, &autoscaling.Instance{InstanceId: &instance1, AvailabilityZone: &az}) - instances = append(instances, &autoscaling.Instance{InstanceId: &instance2, AvailabilityZone: &az}) - clusterState.initializeAsg(asgName, instances) -} diff --git a/controllers/script_runner.go b/controllers/script_runner.go index aced7ede..fe51a571 100644 --- a/controllers/script_runner.go +++ b/controllers/script_runner.go @@ -1,12 +1,9 @@ /* Copyright 2019 Intuit, Inc.. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,167 +15,119 @@ package controllers import ( "fmt" - "github.com/go-logr/logr" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" "os" "os/exec" - "strings" + + "github.com/go-logr/logr" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" ) const ( - // KubeCtlBinary is the path to the kubectl executable - KubeCtlBinary = "/usr/local/bin/kubectl" - // ShellBinary is the path to the shell executable ShellBinary = "/bin/sh" ) type ScriptRunner struct { - Log logr.Logger - KubectlCall string + logr.Logger +} + +type ScriptTarget struct { + InstanceID string + NodeName string + UpgradeObject *v1alpha1.RollingUpgrade } func NewScriptRunner(logger logr.Logger) ScriptRunner { return ScriptRunner{ - Log: logger, - KubectlCall: KubeCtlBinary, + Logger: logger, } } -func (r *ScriptRunner) uncordonNode(nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) (string, error) { - script := fmt.Sprintf("%s uncordon %s", r.KubectlCall, nodeName) - return r.runScript(script, false, ruObj) -} - -func (r *ScriptRunner) drainNode(nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) (string, error) { - // kops behavior implements the same behavior by using these flags when draining nodes - // https://github.com/kubernetes/kops/blob/7a629c77431dda02d02aadf00beb0bed87518cbf/pkg/instancegroups/instancegroups.go lines 337-340 - script := fmt.Sprintf("%s drain %s --ignore-daemonsets=true --delete-local-data=true --force --grace-period=-1", r.KubectlCall, nodeName) - return r.runScript(script, false, ruObj) +func (r *ScriptRunner) getEnv(target ScriptTarget) []string { + var ( + asgNameEnv = "ASG_NAME" + instanceIdEnv = "INSTANCE_ID" + instanceNameEnv = "INSTANCE_NAME" + ) + return []string{ + fmt.Sprintf("%s=%s", asgNameEnv, target.UpgradeObject.ScalingGroupName()), + fmt.Sprintf("%s=%s", instanceIdEnv, target.InstanceID), + fmt.Sprintf("%s=%s", instanceNameEnv, target.NodeName), + } } -func (r *ScriptRunner) runScriptWithEnv(script string, background bool, ruObj *upgrademgrv1alpha1.RollingUpgrade, env []string) (string, error) { - r.info(ruObj, "Running script", "script", script) +func (r *ScriptRunner) runScript(script string, target ScriptTarget) (string, error) { + r.Info("running script", "script", script, "name", target.UpgradeObject.NamespacedName()) command := exec.Command(ShellBinary, "-c", script) - command.Env = append(os.Environ(), env...) - - if background { - r.info(ruObj, "Running script in background. Logs not available.") - err := command.Run() - if err != nil { - r.info(ruObj, fmt.Sprintf("Script finished with error: %s", err)) - } - - return "", nil - } + command.Env = append(os.Environ(), r.getEnv(target)...) out, err := command.CombinedOutput() if err != nil { - r.error(ruObj, err, "Script finished", "output", string(out)) - } else { - r.info(ruObj, "Script finished", "output", string(out)) + return string(out), err } - return string(out), err + return string(out), nil } -func (r *ScriptRunner) runScript(script string, background bool, ruObj *upgrademgrv1alpha1.RollingUpgrade) (string, error) { - return r.runScriptWithEnv(script, background, ruObj, nil) +func (r *ScriptRunner) PostTerminate(target ScriptTarget) error { + script := target.UpgradeObject.PostTerminateScript() + if script == "" { + return nil + } -} + out, err := r.runScript(script, target) + if err != nil { + r.Info("script execution failed", "output", out, "stage", "PostTerminate", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return err + } -// logger creates logger for rolling upgrade. -func (r *ScriptRunner) logger(ruObj *upgrademgrv1alpha1.RollingUpgrade) logr.Logger { - return r.Log.WithValues("rollingupgrade", ruObj.Name) -} + r.Info("script execution succeeded", "output", out, "stage", "PostTerminate", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) -// info logs message with Info level for the specified rolling upgrade. -func (r *ScriptRunner) info(ruObj *upgrademgrv1alpha1.RollingUpgrade, msg string, keysAndValues ...interface{}) { - r.logger(ruObj).Info(msg, keysAndValues...) + return nil } -// error logs message with Error level for the specified rolling upgrade. -func (r *ScriptRunner) error(ruObj *upgrademgrv1alpha1.RollingUpgrade, err error, msg string, keysAndValues ...interface{}) { - r.logger(ruObj).Error(err, msg, keysAndValues...) -} +func (r *ScriptRunner) PreDrain(target ScriptTarget) error { + script := target.UpgradeObject.PreDrainScript() + if script == "" { + return nil + } -func (r *ScriptRunner) PostTerminate(instanceID string, nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - if ruObj.Spec.PostTerminate.Script != "" { - - out, err := r.runScriptWithEnv(ruObj.Spec.PostTerminate.Script, false, ruObj, r.buildEnv(ruObj, instanceID, nodeName)) - if err != nil { - if strings.HasPrefix(out, "Error from server (NotFound)") { - r.error(ruObj, err, "Node not found when running postTerminate. Ignoring ...", "output", out, "instanceID", instanceID) - return nil - } - msg := "Failed to run postTerminate script" - r.error(ruObj, err, msg, "instanceID", instanceID) - return fmt.Errorf("%s: %s: %w", ruObj.NamespacedName(), msg, err) - } + out, err := r.runScript(script, target) + if err != nil { + r.Info("script execution failed", "output", out, "stage", "PreDrain", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return err } - return nil + r.Info("script execution succeeded", "output", out, "stage", "PreDrain", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return nil } -func (r *ScriptRunner) PreDrain(instanceID string, nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - if ruObj.Spec.PreDrain.Script != "" { - script := ruObj.Spec.PreDrain.Script - _, err := r.runScriptWithEnv(script, false, ruObj, r.buildEnv(ruObj, instanceID, nodeName)) - if err != nil { - msg := "Failed to run preDrain script" - r.error(ruObj, err, msg) - return fmt.Errorf("%s: %s: %w", ruObj.NamespacedName(), msg, err) - } +func (r *ScriptRunner) PostDrain(target ScriptTarget) error { + script := target.UpgradeObject.PostDrainScript() + if script == "" { + return nil } - return nil -} -func (r *ScriptRunner) PostWait(instanceID string, nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - if ruObj.Spec.PostDrain.PostWaitScript != "" { - _, err := r.runScriptWithEnv(ruObj.Spec.PostDrain.PostWaitScript, false, ruObj, r.buildEnv(ruObj, instanceID, nodeName)) - if err != nil { - msg := "Failed to run postDrainWait script: " + err.Error() - r.error(ruObj, err, msg) - result := fmt.Errorf("%s: %s: %w", ruObj.NamespacedName(), msg, err) - - if !ruObj.Spec.IgnoreDrainFailures { - r.info(ruObj, "Uncordoning the node since it failed to run postDrainWait Script", "nodeName", nodeName) - _, err = r.uncordonNode(nodeName, ruObj) - if err != nil { - r.error(ruObj, err, "Failed to uncordon", "nodeName", nodeName) - } - } - - return result - } + out, err := r.runScript(script, target) + if err != nil { + r.Info("script execution failed", "output", out, "stage", "PostDrain", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return err } + + r.Info("script execution succeeded", "output", out, "stage", "PostDrain", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) return nil } -func (r *ScriptRunner) PostDrain(instanceID string, nodeName string, ruObj *upgrademgrv1alpha1.RollingUpgrade) error { - if ruObj.Spec.PostDrain.Script != "" { - _, err := r.runScriptWithEnv(ruObj.Spec.PostDrain.Script, false, ruObj, r.buildEnv(ruObj, instanceID, nodeName)) - if err != nil { - msg := "Failed to run postDrain script: " - r.error(ruObj, err, msg) - result := fmt.Errorf("%s: %s: %w", ruObj.NamespacedName(), msg, err) - - if !ruObj.Spec.IgnoreDrainFailures { - r.info(ruObj, "Uncordoning the node since it failed to run postDrain Script", "nodeName", nodeName) - _, err = r.uncordonNode(nodeName, ruObj) - if err != nil { - r.error(ruObj, err, "Failed to uncordon", "nodeName", nodeName) - } - } - - return result - } +func (r *ScriptRunner) PostWait(target ScriptTarget) error { + script := target.UpgradeObject.PostWaitScript() + if script == "" { + return nil } - return nil -} -func (r *ScriptRunner) buildEnv(ruObj *upgrademgrv1alpha1.RollingUpgrade, instanceID string, nodeName string) []string { - return []string{ - fmt.Sprintf("%s=%s", asgNameKey, ruObj.Spec.AsgName), - fmt.Sprintf("%s=%s", instanceIDKey, instanceID), - fmt.Sprintf("%s=%s", instanceNameKey, nodeName), + out, err := r.runScript(script, target) + if err != nil { + r.Info("script execution failed", "output", out, "stage", "PostWait", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return err } + + r.Info("script execution succeeded", "output", out, "stage", "PostWait", "script", script, "name", target.UpgradeObject.NamespacedName(), "target", target.NodeName) + return nil } diff --git a/controllers/script_runner_test.go b/controllers/script_runner_test.go index a09f4330..e6e8236b 100644 --- a/controllers/script_runner_test.go +++ b/controllers/script_runner_test.go @@ -1,52 +1,48 @@ package controllers import ( - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "testing" + + "github.com/keikoproj/upgrade-manager/api/v1alpha1" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtimelog "sigs.k8s.io/controller-runtime/pkg/log" - "testing" ) -func TestEchoScript(t *testing.T) { +func TestScriptSuccess(t *testing.T) { g := gomega.NewGomegaWithT(t) - ru := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - r := &ScriptRunner{Log: runtimelog.NullLogger{}} - out, err := r.runScript("echo hello", false, ru) + r := &ScriptRunner{Logger: runtimelog.NullLogger{}} + target := ScriptTarget{ + InstanceID: "instance", + NodeName: "node", + UpgradeObject: &v1alpha1.RollingUpgrade{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + }, + } + out, err := r.runScript("echo hello", target) g.Expect(err).To(gomega.BeNil()) g.Expect(out).To(gomega.Equal("hello\n")) } -func TestEchoScriptWithEnv(t *testing.T) { +func TestScriptFailure(t *testing.T) { g := gomega.NewGomegaWithT(t) - ru := &upgrademgrv1alpha1.RollingUpgrade{ - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{AsgName: "my-asg"}, + r := &ScriptRunner{Logger: runtimelog.NullLogger{}} + target := ScriptTarget{ + InstanceID: "instance", + NodeName: "node", + UpgradeObject: &v1alpha1.RollingUpgrade{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "default", + }, + }, } - r := &ScriptRunner{Log: runtimelog.NullLogger{}} - env := r.buildEnv(ru, "testInstanceID", "testNodeName") - out, err := r.runScriptWithEnv("echo $INSTANCE_ID:$ASG_NAME:$INSTANCE_NAME", false, ru, env) - - g.Expect(err).To(gomega.BeNil()) - g.Expect(out).To(gomega.Equal("testInstanceID:my-asg:testNodeName\n")) -} - -func TestEchoBackgroundScript(t *testing.T) { - g := gomega.NewGomegaWithT(t) - ru := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - r := &ScriptRunner{Log: runtimelog.NullLogger{}} - out, err := r.runScript("echo background", true, ru) - - g.Expect(err).To(gomega.BeNil()) - g.Expect(out).To(gomega.Equal("")) -} - -func TestRunScriptFailure(t *testing.T) { - g := gomega.NewGomegaWithT(t) - ru := &upgrademgrv1alpha1.RollingUpgrade{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}} - r := &ScriptRunner{Log: runtimelog.NullLogger{}} - out, err := r.runScript("echo this will fail; exit 1", false, ru) + out, err := r.runScript("echo this will fail; exit 1", target) g.Expect(err).To(gomega.Not(gomega.BeNil())) g.Expect(out).To(gomega.Not(gomega.Equal(""))) } diff --git a/controllers/suite_test.go b/controllers/suite_test.go deleted file mode 100644 index ff03687a..00000000 --- a/controllers/suite_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "path/filepath" - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment - -func TestAPIs(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecsWithDefaultAndCustomReporters(t, - "Controller Suite", - []Reporter{envtest.NewlineReporter{}}) -} - -var _ = BeforeSuite(func(done Done) { - logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter))) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, - } - - var err error - cfg, err = testEnv.Start() - Expect(err).ToNot(HaveOccurred()) - Expect(cfg).ToNot(BeNil()) - - err = upgrademgrv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) - Expect(err).ToNot(HaveOccurred()) - Expect(k8sClient).ToNot(BeNil()) - - close(done) -}, 60) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - err := testEnv.Stop() - Expect(err).ToNot(HaveOccurred()) -}) diff --git a/controllers/uniform_across_az_node_selector.go b/controllers/uniform_across_az_node_selector.go deleted file mode 100644 index bf2640a1..00000000 --- a/controllers/uniform_across_az_node_selector.go +++ /dev/null @@ -1,61 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "log" -) - -type azNodesCountState struct { - TotalNodes int - MaxUnavailableNodes int -} - -type UniformAcrossAzNodeSelector struct { - azNodeCounts map[string]*azNodesCountState - ruObj *upgrademgrv1alpha1.RollingUpgrade - asg *autoscaling.Group -} - -func NewUniformAcrossAzNodeSelector(asg *autoscaling.Group, ruObj *upgrademgrv1alpha1.RollingUpgrade) *UniformAcrossAzNodeSelector { - - // find total number of nodes in each AZ - azNodeCounts := make(map[string]*azNodesCountState) - for _, instance := range asg.Instances { - if _, ok := azNodeCounts[*instance.AvailabilityZone]; ok { - azNodeCounts[*instance.AvailabilityZone].TotalNodes += 1 - } else { - azNodeCounts[*instance.AvailabilityZone] = &azNodesCountState{TotalNodes: 1} - } - } - - // find max unavailable for each az - for az, azNodeCount := range azNodeCounts { - azNodeCount.MaxUnavailableNodes = getMaxUnavailable(ruObj.Spec.Strategy, azNodeCount.TotalNodes) - log.Printf("Max unavailable calculated for %s, AZ %s is %d", ruObj.Name, az, azNodeCount.MaxUnavailableNodes) - } - - return &UniformAcrossAzNodeSelector{ - azNodeCounts: azNodeCounts, - ruObj: ruObj, - asg: asg, - } -} - -func (selector *UniformAcrossAzNodeSelector) SelectNodesForRestack(state ClusterState) []*autoscaling.Instance { - var instances []*autoscaling.Instance - - // Fetch instances to update from each instance group - for az, processedState := range selector.azNodeCounts { - // Collect the needed number of instances to update - instancesForUpdate := getNextSetOfAvailableInstancesInAz(selector.ruObj.Spec.AsgName, - az, processedState.MaxUnavailableNodes, selector.asg.Instances, state) - if instancesForUpdate == nil { - log.Printf("No instances available for update in AZ: %s for %s", az, selector.ruObj.Name) - } else { - instances = append(instances, instancesForUpdate...) - } - } - - return instances -} diff --git a/controllers/uniform_across_az_node_selector_test.go b/controllers/uniform_across_az_node_selector_test.go deleted file mode 100644 index 6bf6ff1a..00000000 --- a/controllers/uniform_across_az_node_selector_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package controllers - -import ( - "github.com/aws/aws-sdk-go/service/autoscaling" - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/onsi/gomega" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "testing" -) - -func TestUniformAcrossAzNodeSelectorSelectNodes(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - az2 := "az-2" - az1Instance1 := constructAutoScalingInstance(mockID+"1-"+az, diffLaunchConfig, az) - az2Instance1 := constructAutoScalingInstance(mockID+"1-"+az2, diffLaunchConfig, az2) - az2Instance2 := constructAutoScalingInstance(mockID+"2-"+az2, diffLaunchConfig, az2) - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{ - az1Instance1, - az2Instance1, - az2Instance2, - }, - } - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy, - }, - }, - } - - clusterState := NewClusterState() - clusterState.initializeAsg(*mockAsg.AutoScalingGroupName, mockAsg.Instances) - - nodeSelector := NewUniformAcrossAzNodeSelector(&mockAsg, ruObj) - instances := nodeSelector.SelectNodesForRestack(clusterState) - - g.Expect(2).To(gomega.Equal(len(instances))) - - // group instances by AZ - instancesByAz := make(map[string][]*autoscaling.Instance) - for _, instance := range instances { - az := instance.AvailabilityZone - if _, ok := instancesByAz[*az]; !ok { - instancesInAz := make([]*autoscaling.Instance, 0, len(instances)) - instancesByAz[*az] = instancesInAz - } - instancesByAz[*az] = append(instancesByAz[*az], instance) - } - - // assert on number of instances in each az - g.Expect(1).To(gomega.Equal(len(instancesByAz[az]))) - g.Expect(1).To(gomega.Equal(len(instancesByAz[az2]))) -} - -func TestUniformAcrossAzNodeSelectorSelectNodesOneAzComplete(t *testing.T) { - g := gomega.NewGomegaWithT(t) - - someAsg := "some-asg" - mockID := "some-id" - someLaunchConfig := "some-launch-config" - diffLaunchConfig := "different-launch-config" - az := "az-1" - az2 := "az-2" - az1Instance1 := constructAutoScalingInstance(mockID+"1-"+az, diffLaunchConfig, az) - az2Instance1 := constructAutoScalingInstance(mockID+"1-"+az2, diffLaunchConfig, az2) - az2Instance2 := constructAutoScalingInstance(mockID+"2-"+az2, diffLaunchConfig, az2) - mockAsg := autoscaling.Group{AutoScalingGroupName: &someAsg, - LaunchConfigurationName: &someLaunchConfig, - Instances: []*autoscaling.Instance{ - az1Instance1, - az2Instance1, - az2Instance2, - }, - } - - ruObj := &upgrademgrv1alpha1.RollingUpgrade{ - ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, - Spec: upgrademgrv1alpha1.RollingUpgradeSpec{ - AsgName: someAsg, - Strategy: upgrademgrv1alpha1.UpdateStrategy{ - Type: upgrademgrv1alpha1.UniformAcrossAzUpdateStrategy, - }, - }, - } - - clusterState := NewClusterState() - clusterState.initializeAsg(*mockAsg.AutoScalingGroupName, mockAsg.Instances) - clusterState.markUpdateInProgress(mockID + "1-" + az) - clusterState.markUpdateInProgress(mockID + "1-" + az2) - clusterState.markUpdateCompleted(mockID + "1-" + az) - clusterState.markUpdateCompleted(mockID + "1-" + az2) - - nodeSelector := NewUniformAcrossAzNodeSelector(&mockAsg, ruObj) - instances := nodeSelector.SelectNodesForRestack(clusterState) - - g.Expect(1).To(gomega.Equal(len(instances))) - g.Expect(&az2).To(gomega.Equal(instances[0].AvailabilityZone)) -} diff --git a/controllers/upgrade.go b/controllers/upgrade.go new file mode 100644 index 00000000..afc3725f --- /dev/null +++ b/controllers/upgrade.go @@ -0,0 +1,664 @@ +/* +Copyright 2021 Intuit Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/go-logr/logr" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "github.com/keikoproj/upgrade-manager/controllers/common" + awsprovider "github.com/keikoproj/upgrade-manager/controllers/providers/aws" + kubeprovider "github.com/keikoproj/upgrade-manager/controllers/providers/kubernetes" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +var ( + //DefaultWaitGroupTimeout is the timeout value for DrainGroup + DefaultWaitGroupTimeout = time.Second * 5 + + //LaunchTemplate latest string + LaunchTemplateVersionLatest = "$Latest" +) + +// DrainManager holds the information to perform drain operation in parallel. +type DrainManager struct { + DrainErrors chan error `json:"-"` + DrainGroup *sync.WaitGroup `json:"-"` +} + +type RollingUpgradeContext struct { + logr.Logger + ScriptRunner ScriptRunner + Auth *RollingUpgradeAuthenticator + Cloud *DiscoveredState + RollingUpgrade *v1alpha1.RollingUpgrade + DrainManager *DrainManager + metricsMutex *sync.Mutex + DrainTimeout int + IgnoreDrainFailures bool +} + +func (r *RollingUpgradeContext) RotateNodes() error { + // set status to running + r.RollingUpgrade.SetCurrentStatus(v1alpha1.StatusRunning) + common.SetMetricRollupInitOrRunning(r.RollingUpgrade.Name) + + // set start time + if r.RollingUpgrade.StartTime() == "" { + r.RollingUpgrade.SetStartTime(time.Now().Format(time.RFC3339)) + } + + // discover the state of AWS and K8s cluster. + if err := r.Cloud.Discover(); err != nil { + r.Info("failed to discover the cloud", "scalingGroup", r.RollingUpgrade.ScalingGroupName(), "name", r.RollingUpgrade.NamespacedName()) + r.RollingUpgrade.SetCurrentStatus(v1alpha1.StatusError) + common.SetMetricRollupFailed(r.RollingUpgrade.Name) + return err + } + + var ( + scalingGroup = awsprovider.SelectScalingGroup(r.RollingUpgrade.ScalingGroupName(), r.Cloud.ScalingGroups) + ) + if reflect.DeepEqual(scalingGroup, &autoscaling.Group{}) { + return errors.Errorf("scaling group not found, scalingGroupName: %v", r.RollingUpgrade.ScalingGroupName()) + } + r.Info( + "scaling group details", + "scalingGroup", r.RollingUpgrade.ScalingGroupName(), + "desiredInstances", aws.Int64Value(scalingGroup.DesiredCapacity), + "launchConfig", aws.StringValue(scalingGroup.LaunchConfigurationName), + "name", r.RollingUpgrade.NamespacedName(), + ) + + r.RollingUpgrade.SetTotalNodes(len(scalingGroup.Instances)) + + // check if all instances are rotated. + if !r.IsScalingGroupDrifted() { + r.RollingUpgrade.SetCurrentStatus(v1alpha1.StatusComplete) + common.SetMetricRollupCompleted(r.RollingUpgrade.Name) + r.endTimeUpdate() + return nil + } + + rotationTargets := r.SelectTargets(scalingGroup) + if ok, err := r.ReplaceNodeBatch(rotationTargets); !ok { + return err + } + + return nil +} + +func (r *RollingUpgradeContext) ReplaceNodeBatch(batch []*autoscaling.Instance) (bool, error) { + var ( + mode = r.RollingUpgrade.StrategyMode() + ) + + r.Info("rotating batch", "instances", awsprovider.GetInstanceIDs(batch), "name", r.RollingUpgrade.NamespacedName()) + + //A map to retain the steps for multiple nodes + nodeSteps := make(map[string][]v1alpha1.NodeStepDuration) + + inProcessingNodes := r.RollingUpgrade.Status.NodeInProcessing + if inProcessingNodes == nil { + inProcessingNodes = make(map[string]*v1alpha1.NodeInProcessing) + } + + switch mode { + case v1alpha1.UpdateStrategyModeEager: + for _, target := range batch { + instanceID := aws.StringValue(target.InstanceId) + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + continue + } + + var ( + nodeName = node.GetName() + ) + //Add statistics + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationKickoff) + } + + batchInstanceIDs, inServiceInstanceIDs := awsprovider.GetInstanceIDs(batch), awsprovider.GetInServiceInstanceIDs(batch) + // Tag and set to StandBy only the InService instances. + if len(inServiceInstanceIDs) > 0 { + // Add in-progress tag + r.Info("setting instances to in-progress", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + if err := r.Auth.TagEC2instances(inServiceInstanceIDs, instanceStateTagKey, inProgressTagValue); err != nil { + r.Error(err, "failed to set instances to in-progress", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + return false, err + } + // Standby + r.Info("setting instances to stand-by", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + if err := r.SetBatchStandBy(batchInstanceIDs); err != nil { + r.Info("failed to set instances to stand-by", "instances", batch, "message", err.Error(), "name", r.RollingUpgrade.NamespacedName()) + } + + // requeue until there are no InService instances in the batch + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + return true, nil + } else { + r.Info("no InService instances in the batch", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + } + + // turns onto desired nodes + for _, target := range batch { + instanceID := aws.StringValue(target.InstanceId) + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + continue + } + var ( + nodeName = node.GetName() + ) + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationDesiredNodeReady) + } + + // Wait for desired nodes + r.Info("waiting for desired nodes", "name", r.RollingUpgrade.NamespacedName()) + if !r.DesiredNodesReady() { + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + return true, nil + } + r.Info("desired nodes are ready", "name", r.RollingUpgrade.NamespacedName()) + + case v1alpha1.UpdateStrategyModeLazy: + for _, target := range batch { + instanceID := aws.StringValue(target.InstanceId) + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + continue + } + var ( + nodeName = node.GetName() + ) + // add statistics + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationKickoff) + } + // add in-progress tag + batchInstanceIDs, inServiceInstanceIDs := awsprovider.GetInstanceIDs(batch), awsprovider.GetInServiceInstanceIDs(batch) + r.Info("setting batch to in-progress", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + if err := r.Auth.TagEC2instances(inServiceInstanceIDs, instanceStateTagKey, inProgressTagValue); err != nil { + r.Error(err, "failed to set batch in-progress", "batch", batchInstanceIDs, "instances(InService)", inServiceInstanceIDs, "name", r.RollingUpgrade.NamespacedName()) + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + return false, err + } + } + + var ( + lastTerminationTime = r.RollingUpgrade.LastNodeTerminationTime() + nodeInterval = r.RollingUpgrade.NodeIntervalSeconds() + lastDrainTime = r.RollingUpgrade.LastNodeDrainTime() + drainInterval = r.RollingUpgrade.PostDrainDelaySeconds() + ) + + // check if we are still waiting on a termination delay + if lastTerminationTime != nil && !lastTerminationTime.IsZero() && time.Since(lastTerminationTime.Time).Seconds() < float64(nodeInterval) { + r.Info("reconcile requeue due to termination interval wait", "name", r.RollingUpgrade.NamespacedName()) + return true, nil + } + // check if we are still waiting on a drain delay + if lastDrainTime != nil && !lastDrainTime.IsZero() && time.Since(lastDrainTime.Time).Seconds() < float64(drainInterval) { + r.Info("reconcile requeue due to drain interval wait", "name", r.RollingUpgrade.NamespacedName()) + return true, nil + } + + if reflect.DeepEqual(r.DrainManager.DrainGroup, &sync.WaitGroup{}) { + for _, target := range batch { + instanceID := aws.StringValue(target.InstanceId) + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + continue + } + var ( + nodeName = node.GetName() + scriptTarget = ScriptTarget{ + InstanceID: instanceID, + NodeName: nodeName, + UpgradeObject: r.RollingUpgrade, + } + ) + r.DrainManager.DrainGroup.Add(1) + + // Determine IgnoreDrainFailure and DrainTimeout values. CR spec takes the precedence. + var ( + drainTimeout int + ignoreDrainFailures bool + ) + if r.RollingUpgrade.DrainTimeout() == nil { + drainTimeout = r.DrainTimeout + } else { + drainTimeout = *r.RollingUpgrade.DrainTimeout() + } + + if r.RollingUpgrade.IsIgnoreDrainFailures() == nil { + ignoreDrainFailures = r.IgnoreDrainFailures + } else { + ignoreDrainFailures = *r.RollingUpgrade.IsIgnoreDrainFailures() + } + + // Drain the nodes in parallel + go func() { + defer r.DrainManager.DrainGroup.Done() + + // Turns onto PreDrain script + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationPredrainScript) + + // Predrain script + if err := r.ScriptRunner.PreDrain(scriptTarget); err != nil { + r.DrainManager.DrainErrors <- errors.Errorf("PreDrain failed: instanceID - %v, %v", instanceID, err.Error()) + } + + // Issue drain concurrently - set lastDrainTime + if node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes); node != nil { + r.Info("draining the node", "instance", instanceID, "node name", node.Name, "name", r.RollingUpgrade.NamespacedName()) + + // Turns onto NodeRotationDrain + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationDrain) + + if err := r.Auth.DrainNode(node, time.Duration(r.RollingUpgrade.PostDrainDelaySeconds()), drainTimeout, r.Auth.Kubernetes); err != nil { + // ignore drain failures if either of spec or controller args have set ignoreDrainFailures to true. + if !ignoreDrainFailures { + r.DrainManager.DrainErrors <- errors.Errorf("DrainNode failed: instanceID - %v, %v", instanceID, err.Error()) + return + } + } + } + + // Turns onto NodeRotationPostdrainScript + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostdrainScript) + + // post drain script + if err := r.ScriptRunner.PostDrain(scriptTarget); err != nil { + r.DrainManager.DrainErrors <- errors.Errorf("PostDrain failed: instanceID - %v, %v", instanceID, err.Error()) + } + + // Turns onto NodeRotationPostWait + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostWait) + + // Post Wait Script + if err := r.ScriptRunner.PostWait(scriptTarget); err != nil { + r.DrainManager.DrainErrors <- errors.Errorf("PostWait failed: instanceID - %v, %v", instanceID, err.Error()) + } + }() + } + } + + done := make(chan struct{}) + go func() { + defer close(done) + r.DrainManager.DrainGroup.Wait() + }() + + select { + case err := <-r.DrainManager.DrainErrors: + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + + r.Error(err, "failed to rotate the node", "name", r.RollingUpgrade.NamespacedName()) + return false, err + + case <-done: + // goroutines completed, terminate and requeue + r.RollingUpgrade.SetLastNodeDrainTime(&metav1.Time{Time: time.Now()}) + r.Info("instances drained successfully, terminating", "name", r.RollingUpgrade.NamespacedName()) + for _, target := range batch { + instanceID := aws.StringValue(target.InstanceId) + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + continue + } + var ( + nodeName = node.GetName() + scriptTarget = ScriptTarget{ + InstanceID: instanceID, + NodeName: nodeName, + UpgradeObject: r.RollingUpgrade, + } + ) + + // Turns onto NodeRotationTerminate + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationTerminate) + + // Terminate - set lastTerminateTime + r.Info("terminating instance", "instance", instanceID, "name", r.RollingUpgrade.NamespacedName()) + + if err := r.Auth.TerminateInstance(target); err != nil { + // terminate failures are retryable + r.Info("failed to terminate instance", "instance", instanceID, "message", err.Error(), "name", r.RollingUpgrade.NamespacedName()) + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + return true, nil + } + + r.RollingUpgrade.SetLastNodeTerminationTime(&metav1.Time{Time: time.Now()}) + + // Turns onto NodeRotationTerminate + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationPostTerminate) + + // Post Terminate Script + if err := r.ScriptRunner.PostTerminate(scriptTarget); err != nil { + return false, err + } + + //Calculate the terminating time, + terminatedTime := metav1.Time{ + Time: metav1.Now().Add(time.Duration(r.RollingUpgrade.NodeIntervalSeconds()) * time.Second), + } + r.NodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationTerminated) + r.DoNodeStep(inProcessingNodes, nodeSteps, r.RollingUpgrade.Spec.AsgName, nodeName, v1alpha1.NodeRotationCompleted, terminatedTime) + + } + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + + case <-time.After(DefaultWaitGroupTimeout): + // goroutines timed out - requeue + + r.UpdateMetricsStatus(inProcessingNodes, nodeSteps) + + r.Info("instances are still draining", "name", r.RollingUpgrade.NamespacedName()) + return true, nil + } + return true, nil +} + +func (r *RollingUpgradeContext) SelectTargets(scalingGroup *autoscaling.Group) []*autoscaling.Instance { + var ( + batchSize = r.RollingUpgrade.MaxUnavailable() + totalNodes = len(scalingGroup.Instances) + targets = make([]*autoscaling.Instance, 0) + ) + unavailableInt := CalculateMaxUnavailable(batchSize, totalNodes) + + // first process all in progress instances + r.Info("selecting batch for rotation", "batch size", unavailableInt, "name", r.RollingUpgrade.NamespacedName()) + for _, instance := range r.Cloud.InProgressInstances { + if selectedInstance := awsprovider.SelectScalingGroupInstance(instance, scalingGroup); !reflect.DeepEqual(selectedInstance, &autoscaling.Instance{}) { + //In-progress instances shouldn't be considered if they are in terminating state. + if !common.ContainsEqualFold(awsprovider.TerminatingInstanceStates, aws.StringValue(selectedInstance.LifecycleState)) { + targets = append(targets, selectedInstance) + } + } + } + + if len(targets) > 0 { + r.Info("found in-progress instances", "instances", awsprovider.GetInstanceIDs(targets)) + } + + // select via strategy if there are no in-progress instances + if r.RollingUpgrade.UpdateStrategyType() == v1alpha1.RandomUpdateStrategy { + for _, instance := range scalingGroup.Instances { + if r.IsInstanceDrifted(instance) && !common.ContainsEqualFold(awsprovider.GetInstanceIDs(targets), aws.StringValue(instance.InstanceId)) { + targets = append(targets, instance) + } + } + if unavailableInt > len(targets) { + unavailableInt = len(targets) + } + return targets[:unavailableInt] + + } else if r.RollingUpgrade.UpdateStrategyType() == v1alpha1.UniformAcrossAzUpdateStrategy { + for _, instance := range scalingGroup.Instances { + if r.IsInstanceDrifted(instance) && !common.ContainsEqualFold(awsprovider.GetInstanceIDs(targets), aws.StringValue(instance.InstanceId)) { + targets = append(targets, instance) + } + } + + var AZtargets = make([]*autoscaling.Instance, 0) + AZs := awsprovider.GetScalingAZs(targets) + if len(AZs) == 0 { + return AZtargets + } + for _, target := range targets { + AZ := aws.StringValue(target.AvailabilityZone) + if strings.EqualFold(AZ, AZs[0]) { + AZtargets = append(AZtargets, target) + } + } + if unavailableInt > len(AZtargets) { + unavailableInt = len(AZtargets) + } + return AZtargets[:unavailableInt] + } + return targets +} + +func (r *RollingUpgradeContext) IsInstanceDrifted(instance *autoscaling.Instance) bool { + + var ( + scalingGroupName = r.RollingUpgrade.ScalingGroupName() + scalingGroup = awsprovider.SelectScalingGroup(scalingGroupName, r.Cloud.ScalingGroups) + instanceID = aws.StringValue(instance.InstanceId) + ) + + // if an instance is in terminating state, ignore. + if common.ContainsEqualFold(awsprovider.TerminatingInstanceStates, aws.StringValue(instance.LifecycleState)) { + return false + } + + // check if there is atleast one node that meets the force-referesh criteria + if r.RollingUpgrade.IsForceRefresh() { + node := kubeprovider.SelectNodeByInstanceID(instanceID, r.Cloud.ClusterNodes) + if node == nil { + r.Info("node object not found in clusterNodes, skipping this node for now", "instanceID", instanceID, "name", r.RollingUpgrade.NamespacedName()) + return false + } + var ( + nodeCreationTime = node.CreationTimestamp.Time + upgradeCreationTime = r.RollingUpgrade.CreationTimestamp.Time + ) + if nodeCreationTime.Before(upgradeCreationTime) { + r.Info("rolling upgrade configured for forced refresh", "instance", instanceID, "name", r.RollingUpgrade.NamespacedName()) + return true + } + } + + if scalingGroup.LaunchConfigurationName != nil { + if instance.LaunchConfigurationName == nil { + return true + } + launchConfigName := aws.StringValue(scalingGroup.LaunchConfigurationName) + instanceConfigName := aws.StringValue(instance.LaunchConfigurationName) + if !strings.EqualFold(launchConfigName, instanceConfigName) { + return true + } + } else if scalingGroup.LaunchTemplate != nil { + if instance.LaunchTemplate == nil { + return true + } + + var ( + launchTemplateName = aws.StringValue(scalingGroup.LaunchTemplate.LaunchTemplateName) + instanceTemplateName = aws.StringValue(instance.LaunchTemplate.LaunchTemplateName) + instanceTemplateVersion = aws.StringValue(instance.LaunchTemplate.Version) + templateVersion = aws.StringValue(scalingGroup.LaunchTemplate.Version) + ) + + // replace latest string with latest version number + if strings.EqualFold(templateVersion, LaunchTemplateVersionLatest) { + templateVersion = awsprovider.GetTemplateLatestVersion(r.Cloud.LaunchTemplates, launchTemplateName) + } + + if !strings.EqualFold(launchTemplateName, instanceTemplateName) { + return true + } else if !strings.EqualFold(instanceTemplateVersion, templateVersion) { + return true + } + + } else if scalingGroup.MixedInstancesPolicy != nil { + if instance.LaunchTemplate == nil { + return true + } + + var ( + launchTemplateName = aws.StringValue(scalingGroup.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.LaunchTemplateName) + instanceTemplateName = aws.StringValue(instance.LaunchTemplate.LaunchTemplateName) + instanceTemplateVersion = aws.StringValue(instance.LaunchTemplate.Version) + templateVersion = aws.StringValue(scalingGroup.MixedInstancesPolicy.LaunchTemplate.LaunchTemplateSpecification.Version) + ) + + // replace latest string with latest version number + if strings.EqualFold(templateVersion, LaunchTemplateVersionLatest) { + templateVersion = awsprovider.GetTemplateLatestVersion(r.Cloud.LaunchTemplates, launchTemplateName) + } + + if !strings.EqualFold(launchTemplateName, instanceTemplateName) { + return true + } else if !strings.EqualFold(instanceTemplateVersion, templateVersion) { + return true + } + } + + return false +} + +func (r *RollingUpgradeContext) IsScalingGroupDrifted() bool { + var ( + driftCount = 0 + scalingGroup = awsprovider.SelectScalingGroup(r.RollingUpgrade.ScalingGroupName(), r.Cloud.ScalingGroups) + desiredCapacity = int(aws.Int64Value(scalingGroup.DesiredCapacity)) + ) + r.Info("checking if rolling upgrade is completed", "name", r.RollingUpgrade.NamespacedName()) + + for _, instance := range scalingGroup.Instances { + if r.IsInstanceDrifted(instance) { + driftCount++ + } + } + if driftCount != 0 { + r.Info("drift detected in scaling group", "driftedInstancesCount/DesiredInstancesCount", fmt.Sprintf("(%v/%v)", driftCount, desiredCapacity), "name", r.RollingUpgrade.NamespacedName()) + r.SetProgress(desiredCapacity-driftCount, desiredCapacity) + return true + } + r.SetProgress(desiredCapacity, desiredCapacity) + r.Info("no drift in scaling group", "name", r.RollingUpgrade.NamespacedName()) + return false +} + +func (r *RollingUpgradeContext) DesiredNodesReady() bool { + var ( + scalingGroup = awsprovider.SelectScalingGroup(r.RollingUpgrade.ScalingGroupName(), r.Cloud.ScalingGroups) + desiredInstances = aws.Int64Value(scalingGroup.DesiredCapacity) + readyNodes = 0 + ) + + // wait for desired instances + inServiceInstanceIDs := awsprovider.GetInServiceInstanceIDs(scalingGroup.Instances) + if len(inServiceInstanceIDs) != int(desiredInstances) { + r.Info("desired number of instances are not InService", "desired", int(desiredInstances), "inServiceCount", len(inServiceInstanceIDs), "name", r.RollingUpgrade.NamespacedName()) + return false + } + + // wait for desired nodes + if r.Cloud.ClusterNodes != nil && !reflect.DeepEqual(r.Cloud.ClusterNodes, &corev1.NodeList{}) { + for _, node := range r.Cloud.ClusterNodes { + instanceID := kubeprovider.GetNodeInstanceID(node) + if common.ContainsEqualFold(inServiceInstanceIDs, instanceID) && kubeprovider.IsNodeReady(node) && kubeprovider.IsNodePassesReadinessGates(node, r.RollingUpgrade.Spec.ReadinessGates) { + readyNodes++ + } + } + } + if readyNodes != int(desiredInstances) { + r.Info("desired number of nodes are not ready", "desired", int(desiredInstances), "readyNodesCount", readyNodes, "name", r.RollingUpgrade.NamespacedName()) + return false + } + + return true +} + +func CalculateMaxUnavailable(batchSize intstr.IntOrString, totalNodes int) int { + var unavailableInt int + if batchSize.Type == intstr.String { + if strings.Contains(batchSize.StrVal, "%") { + unavailableInt, _ = intstr.GetValueFromIntOrPercent(&batchSize, totalNodes, true) + } else { + unavailableInt, _ = strconv.Atoi(batchSize.StrVal) + } + } else { + unavailableInt = batchSize.IntValue() + } + + // batch size should be atleast 1 + if unavailableInt == 0 { + unavailableInt = 1 + } + + // batch size should be atmost the number of nodes + if unavailableInt > totalNodes { + unavailableInt = totalNodes + } + + return unavailableInt +} + +func (r *RollingUpgradeContext) SetProgress(nodesProcessed int, totalNodes int) { + completePercentage := int(math.Round(float64(nodesProcessed) / float64(totalNodes) * 100)) + r.RollingUpgrade.SetTotalNodes(totalNodes) + r.RollingUpgrade.SetNodesProcessed(nodesProcessed) + r.RollingUpgrade.SetCompletePercentage(completePercentage) + + // expose total nodes and nodes processed to prometheus + common.SetTotalNodesMetric(r.RollingUpgrade.ScalingGroupName(), totalNodes) + common.SetNodesProcessedMetric(r.RollingUpgrade.ScalingGroupName(), nodesProcessed) + +} + +func (r *RollingUpgradeContext) endTimeUpdate() { + // set end time + r.RollingUpgrade.SetEndTime(time.Now().Format(time.RFC3339)) + + // set total processing time + startTime, err1 := time.Parse(time.RFC3339, r.RollingUpgrade.StartTime()) + endTime, err2 := time.Parse(time.RFC3339, r.RollingUpgrade.EndTime()) + if err1 != nil || err2 != nil { + r.Info("failed to calculate totalProcessingTime") + } else { + var totalProcessingTime = endTime.Sub(startTime) + r.RollingUpgrade.SetTotalProcessingTime(totalProcessingTime.String()) + + // expose total processing time to prometheus + common.TotalProcessingTime(r.RollingUpgrade.ScalingGroupName(), totalProcessingTime) + } +} + +// AWS API call for setting an instance to StandBy has a limit of 19. Hence we have to call the API in batches. +func (r *RollingUpgradeContext) SetBatchStandBy(instanceIDs []string) error { + var err error + instanceBatch := common.GetChunks(instanceIDs, awsprovider.InstanceStandByLimit) + for _, batch := range instanceBatch { + if err = r.Auth.SetInstancesStandBy(batch, r.RollingUpgrade.Spec.AsgName); err != nil { + return err + } + } + return nil +} diff --git a/controllers/upgrade_test.go b/controllers/upgrade_test.go new file mode 100644 index 00000000..a74665d8 --- /dev/null +++ b/controllers/upgrade_test.go @@ -0,0 +1,519 @@ +package controllers + +import ( + "os" + "testing" + + "k8s.io/apimachinery/pkg/util/intstr" + drain "k8s.io/kubectl/pkg/drain" + + "time" + + awsprovider "github.com/keikoproj/upgrade-manager/controllers/providers/aws" + corev1 "k8s.io/api/core/v1" + + //AWS + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/keikoproj/upgrade-manager/api/v1alpha1" +) + +// This test checks implementation of our DrainNode which does both cordon + drain +func TestDrainNode(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + Node *corev1.Node + ExpectError bool + }{ + { + "Drain should succeed as node is registered with fakeClient", + createRollingUpgradeReconciler(t), + createNode("mock-node-1"), + false, + }, + { + "Drain should fail as node is not registered with fakeClient", + createRollingUpgradeReconciler(t), + &corev1.Node{}, + true, + }, + } + + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + err := rollupCtx.Auth.DrainNode( + test.Node, + time.Duration(rollupCtx.RollingUpgrade.PostDrainDelaySeconds()), + 900, + rollupCtx.Auth.Kubernetes, + ) + if (test.ExpectError && err == nil) || (!test.ExpectError && err != nil) { + t.Errorf("Test Description: %s \n expected error(bool): %v, Actual err: %v", test.TestDescription, test.ExpectError, err) + } + } + +} + +// This test checks implementation of the package provided Cordon/Uncordon function +func TestRunCordonOrUncordon(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + Node *corev1.Node + Cordon bool + ExpectError bool + }{ + { + "Cordon should succeed as node is registered with fakeClient", + createRollingUpgradeReconciler(t), + createNode("mock-node-1"), + true, + false, + }, + { + "Cordon should fail as node is not registered with fakeClient", + createRollingUpgradeReconciler(t), + &corev1.Node{}, + true, + true, + }, + { + "Uncordon should succeed as node is registered with fakeClient", + createRollingUpgradeReconciler(t), + func() *corev1.Node { + node := createNode("mock-node-1") + node.Spec.Unschedulable = true + return node + }(), + false, + false, + }, + { + "Uncordon should fail as node is not registered with fakeClient", + createRollingUpgradeReconciler(t), + func() *corev1.Node { + node := &corev1.Node{} + node.Spec.Unschedulable = true + return node + }(), + false, + true, + }, + } + + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + helper := &drain.Helper{ + Client: rollupCtx.Auth.Kubernetes, + Force: true, + GracePeriodSeconds: -1, + IgnoreAllDaemonSets: true, + Out: os.Stdout, + ErrOut: os.Stdout, + DeleteEmptyDirData: true, + Timeout: 900, + } + err := drain.RunCordonOrUncordon(helper, test.Node, test.Cordon) + if (test.ExpectError && err == nil) || (!test.ExpectError && err != nil) { + t.Errorf("Test Description: %s \n expected error(bool): %v, Actual err: %v", test.TestDescription, test.ExpectError, err) + } + //check if the node is actually cordoned/uncordoned. + if test.Cordon && test.Node != nil && !test.Node.Spec.Unschedulable { + t.Errorf("Test Description: %s \n expected the node to be cordoned but it is uncordoned", test.TestDescription) + } + if !test.Cordon && test.Node != nil && test.Node.Spec.Unschedulable { + t.Errorf("Test Description: %s \n expected the node to be uncordoned but it is cordoned", test.TestDescription) + } + + } + +} + +// This test checks implementation of the package provided Drain function +func TestRunDrainNode(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + Node *corev1.Node + ExpectError bool + }{ + { + "Drain should succeed as node is registered with fakeClient", + createRollingUpgradeReconciler(t), + createNode("mock-node-1"), + false, + }, + // This test should fail, create an upstream ticket. + // https://github.com/kubernetes/kubectl/blob/d5b32e7f3c0260abb5b1cd5a62d4eb1de287bc93/pkg/drain/default.go#L33 + // { + // "Drain should fail as node is not registered with fakeClient", + // createRollingUpgradeReconciler(t), + // &corev1.Node{}, + // true, + // }, + } + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + helper := &drain.Helper{ + Client: rollupCtx.Auth.Kubernetes, + Force: true, + GracePeriodSeconds: -1, + IgnoreAllDaemonSets: true, + Out: os.Stdout, + ErrOut: os.Stdout, + DeleteEmptyDirData: true, + Timeout: 900, + } + err := drain.RunNodeDrain(helper, test.Node.Name) + if (test.ExpectError && err == nil) || (!test.ExpectError && err != nil) { + t.Errorf("Test Description: %s \n expected error(bool): %v, Actual err: %v", test.TestDescription, test.ExpectError, err) + } + } + +} + +func TestIsInstanceDrifted(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + Instance *autoscaling.Instance + AsgName *string + ExpectedValue bool + }{ + { + "Instance has the same launch config as the ASG, expect false from IsInstanceDrifted", + createRollingUpgradeReconciler(t), + createASGInstance("mock-instance-1", "mock-launch-config-1"), + aws.String("mock-asg-1"), + false, + }, + { + "Instance has different launch config from the ASG, expect true from IsInstanceDrifted", + createRollingUpgradeReconciler(t), + createASGInstance("mock-instance-1", "different-launch-config"), + aws.String("mock-asg-1"), + true, + }, + { + "Instance has no launch config, expect true from IsInstanceDrifted", + createRollingUpgradeReconciler(t), + createASGInstance("mock-instance-1", ""), + aws.String("mock-asg-1"), + true, + }, + { + "Instance has launch template, expect true from IsInstanceDrifted", + createRollingUpgradeReconciler(t), + createASGInstanceWithLaunchTemplate("mock-instance-1", "mock-launch-template-4"), + aws.String("mock-asg-4"), + true, + }, + { + "Instance has mixed instances launch template, expect true from IsInstanceDrifted", + createRollingUpgradeReconciler(t), + createASGInstanceWithLaunchTemplate("mock-instance-1", "mock-launch-template-5"), + aws.String("mock-asg-5"), + true, + }, + } + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + rollupCtx.Cloud.ScalingGroups = createASGs() + rollupCtx.RollingUpgrade.Spec.AsgName = *test.AsgName + actualValue := rollupCtx.IsInstanceDrifted(test.Instance) + if actualValue != test.ExpectedValue { + t.Errorf("Test Description: %s \n expected value: %v, actual value: %v", test.TestDescription, test.ExpectedValue, actualValue) + } + } +} + +func TestIsScalingGroupDrifted(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + AsgClient *MockAutoscalingGroup + ExpectedValue bool + }{ + { + "All instances have the same launch config as the ASG, expect false from IsScalingGroupDrifted", + createRollingUpgradeReconciler(t), + createASGClient(), + false, + }, + { + "All instances have different launch config as the ASG, expect true from IsScalingGroupDrifted", + createRollingUpgradeReconciler(t), + func() *MockAutoscalingGroup { + newAsgClient := createASGClient() + newAsgClient.autoScalingGroups[0].LaunchConfigurationName = aws.String("different-launch-config") + return newAsgClient + }(), + true, + }, + } + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + rollupCtx.Cloud.ScalingGroups = test.AsgClient.autoScalingGroups + rollupCtx.Auth.AmazonClientSet.AsgClient = test.AsgClient + + actualValue := rollupCtx.IsScalingGroupDrifted() + if actualValue != test.ExpectedValue { + t.Errorf("Test Description: %s \n expected value: %v, actual value: %v", test.TestDescription, test.ExpectedValue, actualValue) + } + } + +} + +func TestRotateNodes(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + AsgClient *MockAutoscalingGroup + RollingUpgradeContext *RollingUpgradeContext + ExpectedValue bool + ExpectedStatusValue string + }{ + { + "All instances have different launch config as the ASG, RotateNodes() should not mark CR complete", + createRollingUpgradeReconciler(t), + createASGClient(), + func() *RollingUpgradeContext { + newRollingUpgradeContext := createRollingUpgradeContext(createRollingUpgradeReconciler(t)) + newRollingUpgradeContext.RollingUpgrade.Spec.AsgName = "mock-asg-2" // The instances in mock-asg are drifted + return newRollingUpgradeContext + }(), + true, + v1alpha1.StatusRunning, + }, + { + "All instances have same launch config as the ASG, RotateNodes() should mark CR complete", + createRollingUpgradeReconciler(t), + createASGClient(), + createRollingUpgradeContext(createRollingUpgradeReconciler(t)), + false, + v1alpha1.StatusComplete, + }, + } + for _, test := range tests { + rollupCtx := test.RollingUpgradeContext + rollupCtx.Cloud.ScalingGroups = test.AsgClient.autoScalingGroups + rollupCtx.Auth.AmazonClientSet.AsgClient = test.AsgClient + + err := rollupCtx.RotateNodes() + if err != nil { + t.Errorf("Test Description: %s \n error: %v", test.TestDescription, err) + } + if rollupCtx.RollingUpgrade.CurrentStatus() != test.ExpectedStatusValue { + t.Errorf("Test Description: %s \n expected value: %s, actual value: %s", test.TestDescription, test.ExpectedStatusValue, rollupCtx.RollingUpgrade.CurrentStatus()) + } + } + +} + +func TestDesiredNodesReady(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + AsgClient *MockAutoscalingGroup + ClusterNodes []*corev1.Node + ExpectedValue bool + }{ + { + "Desired nodes are ready", + createRollingUpgradeReconciler(t), + createASGClient(), + createNodeSlice(), + true, + }, + { + "Desired instances are not ready (desiredCount != inServiceCount)", + createRollingUpgradeReconciler(t), + func() *MockAutoscalingGroup { + newAsgClient := createASGClient() + newAsgClient.autoScalingGroups[0].DesiredCapacity = func(x int) *int64 { i := int64(x); return &i }(4) + return newAsgClient + }(), + createNodeSlice(), + false, + }, + { + "None of the nodes are ready (desiredCount != readyCount)", + createRollingUpgradeReconciler(t), + createASGClient(), + func() []*corev1.Node { + var nodeSlice []*corev1.Node + for i := 0; i < 3; i++ { + node := createNode("mock-node-1") + node.Status.Conditions = []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionFalse}, + } + nodeSlice = append(nodeSlice, node) + } + return nodeSlice + }(), + false, + }, + { + "None of the instances are InService (desiredCount != inServiceCount)", + createRollingUpgradeReconciler(t), + func() *MockAutoscalingGroup { + newAsgClient := createASGClient() + newAsgClient.autoScalingGroups[0].Instances = []*autoscaling.Instance{ + &autoscaling.Instance{InstanceId: aws.String("mock-instance-1"), LifecycleState: aws.String("Pending")}, + &autoscaling.Instance{InstanceId: aws.String("mock-instance-2"), LifecycleState: aws.String("Terminating")}, + &autoscaling.Instance{InstanceId: aws.String("mock-instance-3"), LifecycleState: aws.String("Terminating")}, + } + return newAsgClient + }(), + createNodeSlice(), + false, + }, + } + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + rollupCtx.Cloud.ScalingGroups = test.AsgClient.autoScalingGroups + rollupCtx.Cloud.ClusterNodes = test.ClusterNodes + rollupCtx.Auth.AmazonClientSet.AsgClient = test.AsgClient + + actualValue := rollupCtx.DesiredNodesReady() + if actualValue != test.ExpectedValue { + t.Errorf("Test Description: %s \n expected value: %v, actual value: %v", test.TestDescription, test.ExpectedValue, actualValue) + } + } +} + +func TestSetBatchStandBy(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + RollingUpgrade *v1alpha1.RollingUpgrade + AsgClient *MockAutoscalingGroup + ClusterNodes []*corev1.Node + ExpectedValue error + InstanceStandByLimit int + }{ + { + "Single Batch", + createRollingUpgradeReconciler(t), + func() *v1alpha1.RollingUpgrade { + rollingUpgrade := createRollingUpgrade() + rollingUpgrade.Spec.Strategy.MaxUnavailable = intstr.IntOrString{StrVal: "100%"} + return rollingUpgrade + }(), + createASGClient(), + createNodeSlice(), + nil, + 3, + }, + { + "Multiple Batches", + createRollingUpgradeReconciler(t), + func() *v1alpha1.RollingUpgrade { + rollingUpgrade := createRollingUpgrade() + rollingUpgrade.Spec.Strategy.MaxUnavailable = intstr.IntOrString{StrVal: "100%"} + return rollingUpgrade + }(), + createASGClient(), + createNodeSlice(), + nil, + 1, + }, + { + "Multiple Batches with some overflow", + createRollingUpgradeReconciler(t), + func() *v1alpha1.RollingUpgrade { + rollingUpgrade := createRollingUpgrade() + rollingUpgrade.Spec.Strategy.MaxUnavailable = intstr.IntOrString{StrVal: "100%"} + return rollingUpgrade + }(), + createASGClient(), + createNodeSlice(), + nil, + 2, + }, + } + for _, test := range tests { + awsprovider.InstanceStandByLimit = test.InstanceStandByLimit + rollupCtx := createRollingUpgradeContext(test.Reconciler) + rollupCtx.RollingUpgrade = test.RollingUpgrade + rollupCtx.Cloud.ScalingGroups = test.AsgClient.autoScalingGroups + rollupCtx.Cloud.ClusterNodes = test.ClusterNodes + rollupCtx.Auth.AmazonClientSet.AsgClient = test.AsgClient + + batch := test.AsgClient.autoScalingGroups[0].Instances + actualValue := rollupCtx.SetBatchStandBy(awsprovider.GetInstanceIDs(batch)) + if actualValue != test.ExpectedValue { + t.Errorf("Test Description: %s \n expected value: %v, actual value: %v", test.TestDescription, test.ExpectedValue, actualValue) + } + } +} + +func TestIgnoreDrainFailuresAndDrainTimeout(t *testing.T) { + var tests = []struct { + TestDescription string + Reconciler *RollingUpgradeReconciler + RollingUpgrade *v1alpha1.RollingUpgrade + AsgClient *MockAutoscalingGroup + ClusterNodes []*corev1.Node + ExpectedStatusValue string + }{ + { + "CR spec has IgnoreDrainFailures as nil, so default false should be considered", + createRollingUpgradeReconciler(t), + createRollingUpgrade(), + createASGClient(), + createNodeSlice(), + v1alpha1.StatusComplete, + }, + { + "CR spec has IgnoreDrainFailures as true, so default false should not be considered", + createRollingUpgradeReconciler(t), + func() *v1alpha1.RollingUpgrade { + rollingUpgrade := createRollingUpgrade() + ignoreDrainFailuresValue := true + rollingUpgrade.Spec.IgnoreDrainFailures = &ignoreDrainFailuresValue + return rollingUpgrade + }(), + createASGClient(), + createNodeSlice(), + v1alpha1.StatusComplete, + }, + { + "CR spec has DrainTimeout as nil, so default value of 900 should be considered", + createRollingUpgradeReconciler(t), + createRollingUpgrade(), + createASGClient(), + createNodeSlice(), + v1alpha1.StatusComplete, + }, + { + "CR spec has DrainTimeout as 1800, so default value of 900 should not be considered", + createRollingUpgradeReconciler(t), + func() *v1alpha1.RollingUpgrade { + rollingUpgrade := createRollingUpgrade() + drainTimeoutValue := 1800 + rollingUpgrade.Spec.Strategy.DrainTimeout = &drainTimeoutValue + return rollingUpgrade + }(), + createASGClient(), + createNodeSlice(), + v1alpha1.StatusComplete, + }, + } + for _, test := range tests { + rollupCtx := createRollingUpgradeContext(test.Reconciler) + rollupCtx.RollingUpgrade = test.RollingUpgrade + rollupCtx.Cloud.ScalingGroups = test.AsgClient.autoScalingGroups + rollupCtx.Cloud.ClusterNodes = test.ClusterNodes + rollupCtx.Auth.AmazonClientSet.AsgClient = test.AsgClient + + err := rollupCtx.RotateNodes() + if err != nil { + t.Errorf("Test Description: %s \n error: %v", test.TestDescription, err) + } + if rollupCtx.RollingUpgrade.CurrentStatus() != test.ExpectedStatusValue { + t.Errorf("Test Description: %s \n expected value: %s, actual value: %s", test.TestDescription, test.ExpectedStatusValue, rollupCtx.RollingUpgrade.CurrentStatus()) + } + } +} diff --git a/coverage.txt b/coverage.txt new file mode 100644 index 00000000..81cf37dc --- /dev/null +++ b/coverage.txt @@ -0,0 +1,315 @@ +mode: set +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:59.20,62.2 2 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:65.68,67.50 2 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:89.2,89.37 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:67.50,76.17 3 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:83.3,85.24 3 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:76.17,77.75 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:77.75,79.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:79.10,81.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:86.8,88.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:92.115,94.39 2 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:116.2,116.27 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:94.39,103.17 3 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:110.3,112.24 3 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:103.17,104.75 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:104.75,106.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:106.10,108.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:113.8,115.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:119.55,121.2 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:123.68,125.2 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:128.81,129.42 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:129.42,131.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:131.8,133.45 2 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:142.3,143.36 2 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:165.3,165.38 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:133.45,138.4 4 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:138.9,140.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:143.36,152.18 3 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:159.4,161.25 3 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:152.18,153.76 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:153.76,155.6 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:155.11,157.6 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:162.9,164.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:169.50,171.2 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:173.46,175.2 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/metrics.go:177.43,179.2 1 1 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:24.55,25.29 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:30.2,30.14 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:25.29,26.33 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:26.33,28.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:33.27,34.11 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:37.2,37.10 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:34.11,36.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:40.27,41.11 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:44.2,44.10 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:41.11,43.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:47.58,49.45 2 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:53.2,53.15 1 0 +github.com/keikoproj/upgrade-manager/controllers/common/utils.go:49.45,52.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:43.97,48.2 1 1 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:50.44,53.16 2 1 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:56.2,59.16 3 1 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:62.2,65.16 3 1 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:68.2,70.12 2 1 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:53.16,55.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:59.16,61.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/cloud.go:65.16,67.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:12.153,15.2 2 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:18.104,21.28 3 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:24.2,24.47 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:21.28,23.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:28.100,29.30 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:29.30,30.26 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:30.26,32.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:37.84,39.55 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:48.2,54.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:39.55,40.33 1 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:40.33,46.4 3 0 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:58.164,69.2 2 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:72.139,75.47 2 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:87.2,89.48 3 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:75.47,83.3 2 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:83.8,85.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:89.48,99.3 8 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:99.8,99.50 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:99.50,102.26 3 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:102.26,107.4 4 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:113.118,115.2 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:117.147,119.46 2 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:127.2,127.25 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:119.46,123.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/metrics.go:123.8,126.3 2 1 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:75.106,79.16 4 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:89.2,89.48 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:96.2,97.68 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:103.2,103.47 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:108.2,116.106 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:122.2,122.51 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:133.2,133.16 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:139.2,139.107 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:145.2,164.34 5 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:172.2,172.48 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:178.2,178.73 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:79.16,80.30 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:85.3,85.28 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:80.30,84.4 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:89.48,93.3 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:97.68,101.3 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:103.47,105.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:116.106,119.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:122.51,125.112 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:130.3,130.14 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:125.112,129.4 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:133.16,136.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:139.107,142.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:142.8,144.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:164.34,168.4 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:172.48,176.3 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:182.77,189.2 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:192.76,194.46 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:194.46,196.10 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:202.4,202.15 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:196.10,201.5 4 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:204.46,206.10 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:212.4,212.15 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:206.10,211.5 4 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:214.46,216.10 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:222.4,222.15 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:216.10,221.5 4 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:228.58,229.12 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:229.12,232.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:236.90,238.80 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:238.80,240.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:244.69,248.60 3 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:252.2,252.26 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:255.2,255.21 1 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:248.60,251.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/rollingupgrade_controller.go:252.26,254.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:39.55,43.2 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:45.61,56.2 2 1 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:58.86,64.16 5 1 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:67.2,67.25 1 1 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:64.16,66.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:70.65,72.18 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:76.2,77.16 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:82.2,84.12 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:72.18,74.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:77.16,80.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:87.60,89.18 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:93.2,94.16 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:99.2,100.12 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:89.18,91.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:94.16,97.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:103.61,105.18 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:109.2,110.16 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:115.2,116.12 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:105.18,107.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:110.16,113.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:119.60,121.18 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:125.2,126.16 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:131.2,132.12 2 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:121.18,123.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/script_runner.go:126.16,129.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:65.53,71.40 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:76.2,76.43 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:83.2,86.59 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:89.2,100.32 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:107.2,108.57 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:112.2,112.12 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:71.40,73.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:76.43,81.3 4 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:86.59,88.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:100.32,105.3 4 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:108.57,110.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:115.95,126.30 5 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:130.2,130.14 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:216.2,224.139 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:229.2,229.122 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:234.2,234.69 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:298.2,299.12 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:304.2,304.9 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:372.2,372.18 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:126.30,128.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:131.40,132.32 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:147.3,149.36 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:171.3,171.32 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:185.3,186.29 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:190.3,190.79 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:192.39,193.32 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:207.3,209.111 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:132.32,135.19 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:140.4,144.115 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:135.19,137.13 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:149.36,152.112 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:158.4,159.62 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:164.4,165.20 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:152.112,156.5 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:159.62,161.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:166.9,168.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:171.32,174.19 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:178.4,181.124 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:174.19,176.13 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:186.29,189.4 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:193.32,196.19 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:200.4,204.115 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:196.19,198.13 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:209.111,213.4 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:224.139,227.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:229.122,232.3 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:234.69,235.32 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:235.32,238.19 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:242.4,252.14 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:238.19,240.13 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:252.14,259.65 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:264.5,264.99 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:280.5,283.66 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:288.5,291.65 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:259.65,261.6 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:264.99,270.160 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:270.160,272.52 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:272.52,275.8 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:283.66,285.6 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:291.65,293.6 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:299.12,302.3 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:305.43,309.20 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:311.14,315.32 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:362.3,362.54 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:364.45,370.19 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:315.32,318.19 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:322.4,337.59 4 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:344.4,350.69 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:355.4,359.135 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:318.19,320.13 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:337.59,342.5 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:350.69,352.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:375.104,385.55 4 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:394.2,394.22 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:399.2,399.76 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:433.2,433.16 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:385.55,386.152 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:386.152,388.122 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:388.122,390.5 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:394.22,396.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:399.76,400.51 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:405.3,405.36 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:408.3,408.34 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:400.51,401.141 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:401.141,403.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:405.36,407.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:410.8,410.92 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:410.92,411.51 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:417.3,419.20 3 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:422.3,422.34 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:428.3,428.38 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:431.3,431.36 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:411.51,412.141 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:412.141,414.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:419.20,421.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:422.34,424.37 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:424.37,426.5 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:428.38,430.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:436.88,445.111 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:450.2,450.39 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:466.2,466.49 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:522.2,522.14 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:445.111,447.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:450.39,452.18 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:456.3,460.51 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:452.18,455.4 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:460.51,463.4 2 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:466.49,467.46 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:470.3,472.63 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:467.46,469.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:472.63,474.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:475.8,475.47 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:475.47,476.37 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:480.3,488.70 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:492.3,492.67 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:476.37,478.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:488.70,490.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:492.67,494.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:494.9,494.74 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:494.74,496.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:498.8,498.53 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:498.53,499.37 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:503.3,511.70 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:515.3,515.67 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:499.37,501.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:511.70,513.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:515.67,517.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:517.9,517.74 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:517.74,519.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:525.62,533.50 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:538.2,538.21 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:543.2,545.14 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:533.50,534.36 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:534.36,536.4 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:538.21,542.3 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:548.58,557.56 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:563.2,563.97 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:571.2,571.41 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:576.2,576.13 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:557.56,560.3 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:563.97,564.45 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:564.45,566.187 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:566.187,568.5 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:571.41,574.3 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:579.80,581.37 2 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:592.2,592.25 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:597.2,597.33 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:601.2,601.23 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:581.37,582.46 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:582.46,584.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:584.9,586.4 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:587.8,589.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:592.25,594.3 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:597.33,599.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:604.81,614.2 6 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:616.49,623.32 4 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:623.32,625.3 1 0 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:625.8,631.3 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:635.77,638.38 3 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:643.2,643.12 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:638.38,639.89 1 1 +github.com/keikoproj/upgrade-manager/controllers/upgrade.go:639.89,641.4 1 0 diff --git a/deploy/rolling-upgrade-controller-deploy.yaml b/deploy/rolling-upgrade-controller-deploy.yaml deleted file mode 100644 index a9ebb744..00000000 --- a/deploy/rolling-upgrade-controller-deploy.yaml +++ /dev/null @@ -1,86 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rolling-upgrade-sa - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: rolling-upgrade-sa-role -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin -subjects: -- kind: ServiceAccount - name: rolling-upgrade-sa - namespace: kube-system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: rolling-upgrade-controller - name: rolling-upgrade-controller - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - app: rolling-upgrade-controller - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - creationTimestamp: null - labels: - app: rolling-upgrade-controller - spec: - containers: - - image: keikoproj/rolling-upgrade-controller:latest - imagePullPolicy: Always - name: rolling-upgrade-controller - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - ports: - - containerPort: 8080 - name: metrics - terminationMessagePath: /dev/termination-log - terminationMessagePolicy: File - dnsPolicy: ClusterFirst - nodeSelector: - kubernetes.io/role: master - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - restartPolicy: Always - schedulerName: default-scheduler - securityContext: {} - serviceAccount: rolling-upgrade-sa - serviceAccountName: rolling-upgrade-sa - terminationGracePeriodSeconds: 30 ---- -kind: Service -apiVersion: v1 -metadata: - labels: - iks-metric: http-metrics - name: rolling-upgrade-controller-service - namespace: kube-system -spec: - ports: - - name: iks-metric - port: 8080 - protocol: TCP - targetPort: 8080 - selector: - app: rolling-upgrade-controller \ No newline at end of file diff --git a/docs/RollingUpgradeDesign.png b/docs/RollingUpgradeDesign.png deleted file mode 100644 index a6386e86..00000000 Binary files a/docs/RollingUpgradeDesign.png and /dev/null differ diff --git a/docs/faq.md b/docs/faq.md deleted file mode 100644 index d63a9847..00000000 --- a/docs/faq.md +++ /dev/null @@ -1,53 +0,0 @@ -# Frequently Asked Questions - -## What happens when: - -_1. Draining of a node fails?_ - -RollingUpgrade logs will show the reason why the drain failed (output of `kubectl drain`). It then marks the custom resource status as "error". After marking the object status as "error", the rolling-upgrade controller will ignore the object. - -Users will have to manually "fix" the condition that was causing the drain to fail and remove the "error" status on the custom resource to resume the rolling updates. - -_2. Node is drained but the postDrain script fails?_ - -RollingUpgrade controller will "uncordon" the node, mark the custom resource status as "error" and stop processing the rolling-update. - -Users will have to rectify the postDrain script and remove the "error" status so as to resume the rolling-updates. - -_3. RollingUpgrade controller gets terminated while it was performing the rolling-updates?_ - -- RollingUpgrade controller is run as a deployment. Therefore, if the pod dies, it will get scheduled again by Kubernetes. -- The RollingUpgrade custom resource objects that were being processed will get sent to the "Reconcile" method of the newly running pod. -- Then, the rolling-upgrade controller resumes performing the rolling-updates from where it left. Care is taken to ensure that if any nodes had already been updated, they won't go through the update again. - -## Other details - -_1. Instead of rolling-upgrade controller, why not simply run `kops rolling-update` in a container on the master node?_ - -- kops does not provide additional hooks for `preDrain`, `postDrain` or `postTerminate`, etc. -- Moreover, kops requires additional IAM privileges (such as S3 access) even when running a rolling-update. - -_2. What command line tools can be used in the scripts?_ - -The rolling-upgrade-controller comes with busybox and kubectl binaries in it. - -_3. What types of testing has rolling-upgrade gone through?_ - -- RollingUpgrade has unit tests that ensure that the functionality is robust and can be tested without a real environment. Simply run `make test`. -- RollingUpgrade has been tested for upgrades of 100s of ASGs across several different Kubernetes clusters in dev, pre-prod and production environments. -- The typical sequence of steps for the testing was as follows: - - Create a cluster with multiple different IGs using kops. - - Run `kops edit cluster` to update the Kubernetes version or `kops edit ig` to modify the ASG (instance type) - - Run `kops update cluster` to ensure that the LaunchConfigurations have been updated. - - Create a new RollingUpgrade CR in the cluster for each ASG. - -_4. Are there any best practices for running RollingUpgrade?_ - -- Ensure that DNS resolution does not fail in the cluster during upgrades. RollingUpgrade requires making calls to ec2.amazonaws.com or autoscaling.amazonaws.com. -- Ensure that the rolling-upgrade controller is run as Kubernetes deployment. - -_5. What are the specific cases where the preDrain, postDrain, postDrainWait and postTerminate scripts could be used?_ - -- postDrain: This could be used to ensure that all the pods on that node (or maybe all pods in the cluster) have been successfully scheduled on a different node/s. Additional sleeps could be added to ensure that this condition is being met. In case there are no other nodes in the cluster to run the pods, this script could also add more nodes in the cluster (or wait for cluster-autoscaler to spin-up the nodes). Also, any additional "draining" actions required on the node could be performed here. E.g. The node could be taken out of the target group of ALB by adding an additional label to the node. -- postDrainWait: This could be used to ensure that all pods that have migrated to different nodes are actually running and service client requests. -- postTerminate: This is executed after an ec2 instance is terminated and the `nodeIntervalSeconds` have passed. In the postTerminate script, additional checks such as ensuring that all the nodes (including the newly created one) has successfully joined the cluster can be performed. diff --git a/docs/step-by-step-example.md b/docs/step-by-step-example.md deleted file mode 100644 index 11e6b227..00000000 --- a/docs/step-by-step-example.md +++ /dev/null @@ -1,193 +0,0 @@ -# Reference Installation and Test - -This guide will provide a step by step walkthrough for creating a Kubernetes cluster with Instance Groups and then upgrading those using this rolling-upgrade-controller. - -## Create a Kubernetes cluster using kops - -### Prerequisites - -* Ensure that you have AWS credentials downloaded. A simple test is to run `aws s3 ls` and ensure that you have some output. -* Ensure that you have at least 1 S3 bucket ready for using the [kops state store](https://github.com/kubernetes/kops/blob/master/docs/state.md) -* Ensure that the kops executable is ready to be used. A simple test for this is to run `kops version` and ensure that there is some output. -* Ensure that the kubectl executable is ready to be used. A simple tests for this is to run `kubectl version` and ensure that output shows the client version. -* Copy the following script into a local file; say `/tmp/create_cluster.sh` - -``` bash -#!/bin/bash - -set -ex - -export CLUSTER_NAME=test-cluster-kops.cluster.k8s.local - -if [[ -z "${KOPS_STATE_STORE}" ]]; then - echo "The KOPS_STATE_STORE environment variable needs to be set" - exit 1 -fi - -# Create cluster config. -kops create cluster \ ---name=${CLUSTER_NAME} \ ---state=${KOPS_STATE_STORE} \ ---node-count=2 \ ---zones=us-west-2a,us-west-2b,us-west-2c \ ---master-zones=us-west-2c \ ---node-size=c5.large \ ---master-size=c5.large \ ---master-count=1 \ ---networking=calico \ ---topology=private \ ---ssh-public-key=~/.ssh/id_rsa.pub \ ---kubernetes-version=1.13.9 - -kops create secret --name ${CLUSTER_NAME} sshpublickey admin -i ~/.ssh/id_rsa.pub - -kops update cluster ${CLUSTER_NAME} --yes - -``` - -### Actually create the cluster - -* Create the cluster by running the script above as follows: - * Edit the cluster name by modifying the CLUSTER_NAME variable in the scrip above if needed. - * Change `my-bucket-name` below with the actual name of your s3 bucket. - -`$ KOPS_STATE_STORE=s3://my-bucket-name /tmp/create_cluster.sh` - -* This takes several minutes for the cluster to actually come up. -* Ensure that the cluster is up by running the command `kubectl cluster-info`. You should see some information like the url for the master and maybe endpoint for KubeDNS. -* Congratulations! Your Kubernetes cluster is ready and can be now used for testing. - -## Run the rolling-upgrade-manager - -### Update the IAM role for the rolling-upgrade-controller - -* Before we actually install the rolling-upgrade-controller, we need make sure that the IAM that will be used has enough privileges for the rolling-upgrade-controller to work. -* To do this, let's edit the IAM role used by the master nodes in the above cluster. -* Copy the following into a file; say `/tmp/rolling-upgrade-policy` - -``` json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "ec2:TerminateInstances", - "autoscaling:DescribeAutoScalingGroups", - "autoscaling:TerminateInstanceInAutoScalingGroup" - ], - "Resource": [ - "*" - ] - } - ] -} -``` - -* Create an IAM policy using the document above. - -``` -$ aws iam create-policy --policy-name rolling-upgrade-policy --policy-document file:///tmp/rolling-upgrade-policy -``` - -* This will output a bunch of details such as - -``` json -{ - "Policy": { - "PolicyName": "rolling-upgrade-policy", - "PermissionsBoundaryUsageCount": 0, - "CreateDate": "2019-01-01T01:01:01Z", - "AttachmentCount": 0, - "IsAttachable": true, - "PolicyId": "ABCDZ1243FDS5432J", - "DefaultVersionId": "v1", - "Path": "/", - "Arn": "arn:aws:iam::0123456789:policy/rolling-upgrade-policy", - "UpdateDate": "2019-01-01T01:01:01Z" - } -} -``` - -* The "Arn" field from the above output is required in the next command. -* Attach this policy to the role of the Kubernetes master nodes using the Arn from above. The role-name is `masters.` - -``` -$ aws iam attach-role-policy --role-name masters.test-cluster-kops.cluster.k8s.local --policy-arn arn:aws:iam::0123456789:policy/rolling-upgrade-policy -``` - -### Install the rolling-upgrade-controller - -* Install the CRD using: `$ kubectl apply -f https://raw.githubusercontent.com/keikoproj/upgrade-manager/master/config/crd/bases/upgrademgr.keikoproj.io_rollingupgrades.yaml` -* Install the controller using: -`$ kubectl create -f https://raw.githubusercontent.com/keikoproj/upgrade-manager/master/deploy/rolling-upgrade-controller-deploy.yaml` -* Ensure that the rolling-upgrade-controller deployment is running. - -## Actually perform rolling-upgrade of one InstanceGroup - -### Update the nodes AutoScalingGroup - -* Update the `nodes` instance-group so that it needs a rolling-upgrade. The following command will open the specification for the nodes instance-group in an editor. Change the instance type from `c4.large` to `r4.large`. -`$ KOPS_STATE_STORE=s3://my-bucket-name kops edit ig nodes` - -* The run kops upgrade to make these changes persist and have kops modify the ASG's launch configuration -`$ KOPS_STATE_STORE=s3://my-bucket-name kops update cluster --yes` - -### Create the RollingUpgrade custom-resource (CR) that will actually do the rolling-upgrade. - -* Run the following script: - -``` bash -#!/bin/bash - -set -ex - -cat << EOF > /tmp/crd.yaml -apiVersion: upgrademgr.keikoproj.io/v1alpha1 -kind: RollingUpgrade -metadata: - generateName: rollingupgrade-sample- -spec: - asgName: nodes.test-cluster-kops.cluster.k8s.local - nodeIntervalSeconds: 300 - preDrain: - script: | - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postDrain: - script: | - echo "Pods at PostDrain:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - waitSeconds: 90 - postWaitScript: | - echo "Pods at postWait:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postTerminate: - script: | - echo "PostTerminate:" - kubectl get pods --all-namespaces -EOF - -kubectl create -f /tmp/crd.yaml -``` - -### Ensure nodes are getting updated - -* As soon as the above CR is submitted, the rolling-upgrade-controller will pick it up and start the rolling-upgrade process. -* There are multiple ways to ensure that rolling-upgrades are actually happening. - * Watch the AWS console. Existing nodes should be seen getting Terminated. New nodes coming up should be of type r4.large. - * Run `kubectl get nodes`. Some node will either have SchedulingDisabled or it could be terminated and new node should be seen coming up. - * Check the status in the actual CR. It has details of how many nodes are going to be upgraded and how many have been completed. `$ kubectl get rollingupgrade -o yaml` -* Checks the logs of the rolling-upgrade-controller for minute details of how the CR is being processed. - -## Deleting the cluster - -* Before deleting the cluster, the policy that was created explicitly will have to be deleted. - -``` -$ aws iam detach-role-policy --role-name masters.test-cluster-kops.cluster.k8s.local --policy-arn arn:aws:iam::0123456789:policy/rolling-upgrade-policy -$ aws iam delete-policy --policy-arn arn:aws:iam::0123456789:policy/rolling-upgrade-policy -``` - -* Now delete the cluster - -`$ KOPS_STATE_STORE=s3://my-bucket-name kops delete cluster test-cluster-kops.cluster.k8s.local --yes` diff --git a/examples/basic.yaml b/examples/basic.yaml deleted file mode 100644 index 93e96117..00000000 --- a/examples/basic.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: upgrademgr.keikoproj.io/v1alpha1 -kind: RollingUpgrade -metadata: - generateName: rollingupgrade-sample- -spec: - asgName: my-asg - nodeIntervalSeconds: 300 - postDrain: - waitSeconds: 90 diff --git a/examples/pre_post_drain.yaml b/examples/pre_post_drain.yaml deleted file mode 100644 index 6ecdc1d5..00000000 --- a/examples/pre_post_drain.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: upgrademgr.keikoproj.io/v1alpha1 -kind: RollingUpgrade -metadata: - generateName: rollingupgrade-sample- -spec: - asgName: my-asg-1 - nodeIntervalSeconds: 300 - preDrain: - script: | - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postDrain: - script: | - echo "Pods at PostDrain:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - waitSeconds: 90 - postWaitScript: | - echo "Pods at postWait:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postTerminate: - script: | - echo "PostTerminate:" - kubectl get pods --all-namespaces diff --git a/examples/random_update_strategy.yaml b/examples/random_update_strategy.yaml deleted file mode 100644 index 7b80af9e..00000000 --- a/examples/random_update_strategy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: upgrademgr.keikoproj.io/v1alpha1 -kind: RollingUpgrade -metadata: - generateName: rollingupgrade-sample- -spec: - asgName: my-asg-1 - nodeIntervalSeconds: 300 - preDrain: - script: | - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postDrain: - script: | - echo "Pods at PostDrain:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - waitSeconds: 90 - postWaitScript: | - echo "Pods at postWait:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postTerminate: - script: | - echo "PostTerminate:" - kubectl get pods --all-namespaces - strategy: - type: "randomUpdate" - maxUnavailable: "100%" - drainTimeout: 120 diff --git a/examples/uniform_across_az_update_strategy.yaml b/examples/uniform_across_az_update_strategy.yaml deleted file mode 100644 index 04f9d2f0..00000000 --- a/examples/uniform_across_az_update_strategy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: upgrademgr.keikoproj.io/v1alpha1 -kind: RollingUpgrade -metadata: - generateName: rollingupgrade-sample- -spec: - asgName: my-asg-1 - nodeIntervalSeconds: 300 - preDrain: - script: | - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postDrain: - script: | - echo "Pods at PostDrain:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - waitSeconds: 90 - postWaitScript: | - echo "Pods at postWait:" - kubectl get pods --all-namespaces --field-selector spec.nodeName=${INSTANCE_NAME} - postTerminate: - script: | - echo "PostTerminate:" - kubectl get pods --all-namespaces - strategy: - type: "uniformAcrossAzUpdate" - maxUnavailable: "25%" - drainTimeout: 120 diff --git a/go.mod b/go.mod index b44f0e53..f114b375 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,20 @@ module github.com/keikoproj/upgrade-manager -go 1.16 +go 1.15 require ( - github.com/aws/aws-sdk-go v1.36.11 - github.com/cucumber/godog v0.10.0 - github.com/go-logr/logr v0.1.0 + github.com/aws/aws-sdk-go v1.38.24 + github.com/go-logr/logr v0.3.0 github.com/keikoproj/aws-sdk-go-cache v0.0.0-20201118182730-f6f418a4e2df - github.com/keikoproj/inverse-exp-backoff v0.0.0-20201007213207-e4a3ac0f74ab - github.com/keikoproj/kubedog v0.0.1 - github.com/onsi/ginkgo v1.14.2 - github.com/onsi/gomega v1.10.4 - github.com/prometheus/client_golang v1.0.0 + github.com/onsi/gomega v1.10.2 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.7.1 github.com/sirupsen/logrus v1.6.0 - go.uber.org/zap v1.16.0 - golang.org/x/net v0.0.0-20201216054612-986b41b23924 - gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.17.15 - k8s.io/apiextensions-apiserver v0.17.15 // indirect - k8s.io/apimachinery v0.17.15 - k8s.io/client-go v0.17.15 - k8s.io/utils v0.0.0-20191218082557-f07c713de883 // indirect - sigs.k8s.io/controller-runtime v0.4.0 - sigs.k8s.io/controller-tools v0.2.4 // indirect + go.uber.org/zap v1.15.0 + k8s.io/api v0.20.4 + k8s.io/apimachinery v0.20.4 + k8s.io/client-go v0.20.4 + k8s.io/kubectl v0.20.4 + sigs.k8s.io/controller-runtime v0.7.0 + ) diff --git a/go.sum b/go.sum index 3319a429..b2aebc80 100644 --- a/go.sum +++ b/go.sum @@ -1,24 +1,77 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.1 h1:eVvIXUKiTgv++6YnWb42DUA1YL7qDugnKP0HljexdnQ= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -26,86 +79,100 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M= -github.com/aws/aws-sdk-go v1.33.8 h1:2/sOfb9oPHTRZ0lxinoaTPDcYwNa1H/SpKP4nVRBwmg= -github.com/aws/aws-sdk-go v1.33.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.35.7/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.36.11 h1:6lVRjsmRpQwq58+YHBbBe7BZuY3l6onDBLN4twOXT7U= -github.com/aws/aws-sdk-go v1.36.11/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/aws/aws-sdk-go v1.36.24 h1:uVuio0zA5ideP3DGZDpIoExQJd0WcoNUVlNZaKwBnf8= +github.com/aws/aws-sdk-go v1.36.24/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.38.24 h1:zbKHDxFepE77ihVMZ+wZ62Ci646zkorN8rB5s4fj4kU= +github.com/aws/aws-sdk-go v1.38.24/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cucumber/gherkin-go/v11 v11.0.0 h1:cwVwN1Qn2VRSfHZNLEh5x00tPBmZcjATBWDpxsR5Xug= -github.com/cucumber/gherkin-go/v11 v11.0.0/go.mod h1:CX33k2XU2qog4e+TFjOValoq6mIUq0DmVccZs238R9w= -github.com/cucumber/godog v0.10.0 h1:W01u1+o8bRpgqJRLrclN3iAanU1jAao+TwOMoSV9g1Y= -github.com/cucumber/godog v0.10.0/go.mod h1:0Q+MOUg8Z9AhzLV+nNMbThQ2x1b17yYwGyahApTLjJA= -github.com/cucumber/messages-go/v10 v10.0.1/go.mod h1:kA5T38CBlBbYLU12TIrJ4fk4wSkVVOgyh7Enyy8WnSg= -github.com/cucumber/messages-go/v10 v10.0.3 h1:m/9SD/K/A15WP7i1aemIv7cwvUw+viS51Ui5HBw1cdE= -github.com/cucumber/messages-go/v10 v10.0.3/go.mod h1:9jMZ2Y8ZxjLY6TG2+x344nt5rXstVVDYSdS5ySfI1WY= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -118,11 +185,13 @@ github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+ github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -136,6 +205,7 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= @@ -145,111 +215,130 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= -github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= -github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-memdb v1.2.1 h1:wI9btDjYUOJJHTCnRlAG/TkRyD/ij7meJMrLK9X31Cc= -github.com/hashicorp/go-memdb v1.2.1/go.mod h1:OSvLJ662Jim8hMM+gWGyhktyWk2xPCnWMc7DWIqtkGA= -github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/karlseguin/ccache v2.0.3+incompatible h1:j68C9tWOROiOLWTS/kCGg9IcJG+ACqn5+0+t8Oh83UU= github.com/karlseguin/ccache v2.0.3+incompatible/go.mod h1:CM9tNPzT6EdRh14+jiW8mEF9mkNZuuE51qmgGYUB93w= @@ -257,51 +346,55 @@ github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8 github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/keikoproj/aws-sdk-go-cache v0.0.0-20201118182730-f6f418a4e2df h1:5CIVZTNmDF4GwbyQzRGYLoG1mo2LHJSO4UwAUDNpgDw= github.com/keikoproj/aws-sdk-go-cache v0.0.0-20201118182730-f6f418a4e2df/go.mod h1:WuCkHvglMhs9DQnwssll4dy87h352LIfN3qfyk6l6Rg= -github.com/keikoproj/inverse-exp-backoff v0.0.0-20201007213207-e4a3ac0f74ab h1:8/LbUmjJHVF8NZYHwlSWGgI731i0gFkF2posm/sAvB0= -github.com/keikoproj/inverse-exp-backoff v0.0.0-20201007213207-e4a3ac0f74ab/go.mod h1:ziu/tMrrvs8n+AI+HCZBb6wZS609fcMl5ygR2RttEE4= -github.com/keikoproj/kubedog v0.0.1 h1:SKo5g78QvlXx+JniYGvgohsB/5dm6bdj6ccyyUrnLDs= -github.com/keikoproj/kubedog v0.0.1/go.mod h1:8aRJYCL//c+RvycK3qsAfuHqyS1EP7Pa4g8M+t1wO3M= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd h1:aY7OQNf2XqY/JQ6qREWamhI/81os/agb2BAGpcx5yWI= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -309,170 +402,192 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= -github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= -github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -481,25 +596,32 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201216054612-986b41b23924 h1:QsnDpLLOKwHBBDa8nDws4DYNc/ryVW2vCpxCs09d4PY= -golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -508,49 +630,59 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -559,64 +691,120 @@ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac h1:MQEvx39qSf8vyrx3XRaOe+j1UDIzKwkYOVObRgGPVqI= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/karlseguin/expect.v1 v1.0.1 h1:9u0iUltnhFbJTHaSIH0EP+cuTU5rafIgmcsEsg2JQFw= gopkg.in/karlseguin/expect.v1 v1.0.1/go.mod h1:uB7QIJBcclvYbwlUDkSCsGjAOMis3fP280LyhuDEf2I= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -625,82 +813,81 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= -k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.17.15 h1:ddnV/lTRb+ihe+eo0K8npm5Ypxi0TayGQHEcJ8TRT2c= -k8s.io/api v0.17.15/go.mod h1:mCepU58Bb3HpTKL9PsivAEq7oeWHTj9eK2Drst9gPKU= -k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.17.15 h1:VXM8c4Y5xNBVXcIZTGSoxsy+YZ0QIfM58I9e7zY4PB8= -k8s.io/apiextensions-apiserver v0.17.15/go.mod h1:Qk6xT8Lbb88c9reVVWUrQJWwk24iutKH3xWd1iAXvSI= -k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= -k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.15 h1:Ne6L9kEYiRl1aCKilhzeMFSGH/w14ztxi3vBjoz4osM= -k8s.io/apimachinery v0.17.15/go.mod h1:T54ZSpncArE25c5r2PbUPsLeTpkPWY/ivafigSX6+xk= -k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.17.15/go.mod h1:D3U5E/WgntRX0vcPjGW9BInpLypADtb0YMEd257xz6w= -k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= -k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.17.15 h1:fI7P1oJimO2au8eEOAzLKN6iTMA78mLUl+EU85AUz5s= -k8s.io/client-go v0.17.15/go.mod h1:KDGIHDbyT9c20v+rmEdnGHhpQLYkYgQolBCZowcy1VM= -k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= -k8s.io/code-generator v0.17.15/go.mod h1:iiHz51+oTx+Z9D0vB3CH3O4HDDPWrvZyUgUYaIE9h9M= -k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.17.15/go.mod h1:Hi4gj6KS14OpJUtz62ofz5GquCq9qSCHvhn/NLoe8QE= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 h1:NeQXVJ2XFSkRoPzRo8AId01ZER+j8oV4SZADT4iBOXQ= -k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20191218082557-f07c713de883 h1:TA8t8OLS8m3/0dtTckekO0pCQ7qMnD19fsZTQEgCSKQ= -k8s.io/utils v0.0.0-20191218082557-f07c713de883/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -sigs.k8s.io/controller-runtime v0.4.0 h1:wATM6/m+3w8lj8FXNaO6Fs/rq/vqoOjO1Q116Z9NPsg= -sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= -sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA= -sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= -sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= -sigs.k8s.io/testing_frameworks v0.1.2 h1:vK0+tvjF0BZ/RYFeZ1E6BYBwHJJXhjuZ3TdsEKH+UQM= -sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.20.4 h1:xZjKidCirayzX6tHONRQyTNDVIR55TYVqgATqo6ZULY= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= +k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.4 h1:vhxQ0PPUUU2Ns1b9r4/UFp13UPs8cw2iOoTjnY9faa0= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/cli-runtime v0.20.4 h1:jVU13lBeebHLtarHeHkoIi3uRONFzccmP7hHLzEoQ4w= +k8s.io/cli-runtime v0.20.4/go.mod h1:dz38e1CM4uuIhy8PMFUZv7qsvIdoE3ByZYlmbHNCkt4= +k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.20.4 h1:85crgh1IotNkLpKYKZHVNI1JT86nr/iDCvq2iWKsql4= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.20.4/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= +k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.20.4 h1:gdvPs4G11e99meQnW4zN+oYOjH8qkLz1sURrAzvKWqc= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-helpers v0.20.4/go.mod h1:S7jGg8zQp3kwvSzfuGtNaQAMVmvzomXDioTm5vABn9g= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubectl v0.20.4 h1:Y1gUiigiZM+ulcrnWeqSHlTd0/7xWcQIXjuMnjtHyoo= +k8s.io/kubectl v0.20.4/go.mod h1:yCC5lUQyXRmmtwyxfaakryh9ezzp/bT0O14LeoFLbGo= +k8s.io/metrics v0.20.4/go.mod h1:DDXS+Ls+2NAxRcVhXKghRPa3csljyJRjDRjPe6EOg/g= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= +sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= +sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index b92001fb..74601199 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,4 +1,5 @@ /* +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/main.go b/main.go index ba5e55d2..0dc2ea95 100644 --- a/main.go +++ b/main.go @@ -1,4 +1,5 @@ /* +Copyright 2021 Intuit Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,36 +18,43 @@ package main import ( "flag" - "fmt" "os" + "sync" "time" + "github.com/keikoproj/upgrade-manager/controllers/common" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/ec2" "github.com/go-logr/logr" "github.com/keikoproj/aws-sdk-go-cache/cache" + upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" + "github.com/keikoproj/upgrade-manager/controllers" + "github.com/keikoproj/upgrade-manager/controllers/common/log" + awsprovider "github.com/keikoproj/upgrade-manager/controllers/providers/aws" + kubeprovider "github.com/keikoproj/upgrade-manager/controllers/providers/kubernetes" uberzap "go.uber.org/zap" "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" - - upgrademgrv1alpha1 "github.com/keikoproj/upgrade-manager/api/v1alpha1" - "github.com/keikoproj/upgrade-manager/controllers" - "github.com/keikoproj/upgrade-manager/controllers/common" - "github.com/keikoproj/upgrade-manager/pkg/log" - // +kubebuilder:scaffold:imports + //+kubebuilder:scaffold:imports ) var ( scheme = runtime.NewScheme() - setupLog = ctrl.Log.WithName("setup") + setupLog = ctrl.Log.WithName("main") ) var ( @@ -58,54 +66,73 @@ var ( ) func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(upgrademgrv1alpha1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme - err := upgrademgrv1alpha1.AddToScheme(scheme) - if err != nil { - panic(err) - } - // +kubebuilder:scaffold:scheme common.InitMetrics() } func main() { - var metricsAddr string - var enableLeaderElection bool - var namespace string - var maxParallel int - var maxAPIRetries int - var debugMode bool - var logMode string + + var ( + metricsAddr string + probeAddr string + enableLeaderElection bool + namespace string + maxParallel int + maxAPIRetries int + debugMode bool + logMode string + drainTimeout int + ignoreDrainFailures bool + ) + flag.BoolVar(&debugMode, "debug", false, "enable debug logging") flag.StringVar(&logMode, "log-format", "text", "Log mode: supported values: text, json.") flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") flag.StringVar(&namespace, "namespace", "", "The namespace in which to watch objects") flag.IntVar(&maxParallel, "max-parallel", 10, "The max number of parallel rolling upgrades") flag.IntVar(&maxAPIRetries, "max-api-retries", 12, "The number of maximum retries for failed/rate limited AWS API calls") + flag.IntVar(&drainTimeout, "drain-timeout", 900, "when the drain command should timeout") + flag.BoolVar(&ignoreDrainFailures, "ignore-drain-failures", false, "proceed with instance termination despite drain failures.") + + opts := zap.Options{ + Development: true, + } + opts.BindFlags(flag.CommandLine) flag.Parse() ctrl.SetLogger(newLogger(logMode)) - mgo := ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - LeaderElection: enableLeaderElection, + ctrlOpts := ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "d6edb06e.keikoproj.io", } + if namespace != "" { - mgo.Namespace = namespace - setupLog.Info("Watch RollingUpgrade objects only in namespace " + namespace) + ctrlOpts.Namespace = namespace + setupLog.Info("starting watch in namespaced mode", "namespace", namespace) } else { - setupLog.Info("Watch RollingUpgrade objects in all namespaces") + setupLog.Info("starting watch in all namespaces") } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgo) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrlOpts) if err != nil { setupLog.Error(err, "unable to start manager") os.Exit(1) } var region string - if region, err = deriveRegion(); err != nil { + if region, err = awsprovider.DeriveRegion(); err != nil { setupLog.Error(err, "unable to get region") os.Exit(1) } @@ -127,7 +154,8 @@ func main() { config = request.WithRetryer(config, log.NewRetryLogger(retryer)) sess, err := session.NewSession(config) if err != nil { - log.Fatalf("failed to AWS session, %v", err) + setupLog.Error(err, "failed to create an AWS session") + os.Exit(1) } cacheCfg := cache.NewConfig(CacheDefaultTTL, CacheMaxItems, CacheItemsToPrune) @@ -143,25 +171,61 @@ func main() { ) }) + kube, err := kubeprovider.GetKubernetesClient() + if err != nil { + setupLog.Error(err, "unable to create kubernetes client") + os.Exit(1) + } + + awsClient := &awsprovider.AmazonClientSet{ + Ec2Client: ec2.New(sess), + AsgClient: autoscaling.New(sess), + } + + kubeClient := &kubeprovider.KubernetesClientSet{ + Kubernetes: kube, + } + logger := ctrl.Log.WithName("controllers").WithName("RollingUpgrade") + reconciler := &controllers.RollingUpgradeReconciler{ - Client: mgr.GetClient(), - Log: logger, - ClusterState: controllers.NewClusterState(), - ASGClient: autoscaling.New(sess), - EC2Client: ec2.New(sess), - CacheConfig: cacheCfg, - ScriptRunner: controllers.NewScriptRunner(logger), + Client: mgr.GetClient(), + Logger: logger, + Scheme: mgr.GetScheme(), + CacheConfig: cacheCfg, + Auth: &controllers.RollingUpgradeAuthenticator{ + AmazonClientSet: awsClient, + KubernetesClientSet: kubeClient, + }, + EventWriter: kubeprovider.NewEventWriter(kubeClient, logger), + ScriptRunner: controllers.ScriptRunner{ + Logger: logger, + }, + DrainGroupMapper: &sync.Map{}, + DrainErrorMapper: &sync.Map{}, + ClusterNodesMap: &sync.Map{}, + ReconcileMap: &sync.Map{}, + DrainTimeout: drainTimeout, + IgnoreDrainFailures: ignoreDrainFailures, } reconciler.SetMaxParallel(maxParallel) - err = (reconciler).SetupWithManager(mgr) - if err != nil { + if err = (reconciler).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "RollingUpgrade") os.Exit(1) } - // +kubebuilder:scaffold:builder + + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("check", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } setupLog.Info("registering prometheus") @@ -170,6 +234,7 @@ func main() { setupLog.Error(err, "problem running manager") os.Exit(1) } + } func newLogger(logMode string) logr.Logger { @@ -187,22 +252,3 @@ func newLogger(logMode string) logr.Logger { logger := zap.New(opts...) return logger } - -func deriveRegion() (string, error) { - - if region := os.Getenv("AWS_REGION"); region != "" { - return region, nil - } - - var config aws.Config - sess := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: session.SharedConfigEnable, - Config: config, - })) - c := ec2metadata.New(sess) - region, err := c.Region() - if err != nil { - return "", fmt.Errorf("cannot reach ec2metadata, if running locally export AWS_REGION: %w", err) - } - return region, nil -} diff --git a/test-bdd/Makefile b/test-bdd/Makefile new file mode 100644 index 00000000..7640c4e9 --- /dev/null +++ b/test-bdd/Makefile @@ -0,0 +1,12 @@ +test: fmt vet + go test ./... -coverprofile coverage.txt + go tool cover -html=./coverage.txt -o cover.html + + +# Run go fmt against code +fmt: + go fmt ./... + +# Run go vet against code +vet: + go vet ./... \ No newline at end of file diff --git a/test-bdd/go.mod b/test-bdd/go.mod new file mode 100644 index 00000000..e54397c5 --- /dev/null +++ b/test-bdd/go.mod @@ -0,0 +1,9 @@ +module github.com/keikoproj/upgrade-manager/test-bdd + +go 1.16 + +require ( + github.com/cucumber/godog v0.10.0 + github.com/keikoproj/kubedog v0.0.1 + github.com/sirupsen/logrus v1.6.0 +) \ No newline at end of file diff --git a/test-bdd/go.sum b/test-bdd/go.sum new file mode 100644 index 00000000..ecf143ed --- /dev/null +++ b/test-bdd/go.sum @@ -0,0 +1,236 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/aslakhellesoy/gox v1.0.100/go.mod h1:AJl542QsKKG96COVsv0N74HHzVQgDIQPceVUh1aeU2M= +github.com/aws/aws-sdk-go v1.33.8 h1:2/sOfb9oPHTRZ0lxinoaTPDcYwNa1H/SpKP4nVRBwmg= +github.com/aws/aws-sdk-go v1.33.8/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cucumber/gherkin-go/v11 v11.0.0 h1:cwVwN1Qn2VRSfHZNLEh5x00tPBmZcjATBWDpxsR5Xug= +github.com/cucumber/gherkin-go/v11 v11.0.0/go.mod h1:CX33k2XU2qog4e+TFjOValoq6mIUq0DmVccZs238R9w= +github.com/cucumber/godog v0.10.0 h1:W01u1+o8bRpgqJRLrclN3iAanU1jAao+TwOMoSV9g1Y= +github.com/cucumber/godog v0.10.0/go.mod h1:0Q+MOUg8Z9AhzLV+nNMbThQ2x1b17yYwGyahApTLjJA= +github.com/cucumber/messages-go/v10 v10.0.1/go.mod h1:kA5T38CBlBbYLU12TIrJ4fk4wSkVVOgyh7Enyy8WnSg= +github.com/cucumber/messages-go/v10 v10.0.3 h1:m/9SD/K/A15WP7i1aemIv7cwvUw+viS51Ui5HBw1cdE= +github.com/cucumber/messages-go/v10 v10.0.3/go.mod h1:9jMZ2Y8ZxjLY6TG2+x344nt5rXstVVDYSdS5ySfI1WY= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= +github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-memdb v1.2.1 h1:wI9btDjYUOJJHTCnRlAG/TkRyD/ij7meJMrLK9X31Cc= +github.com/hashicorp/go-memdb v1.2.1/go.mod h1:OSvLJ662Jim8hMM+gWGyhktyWk2xPCnWMc7DWIqtkGA= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/keikoproj/kubedog v0.0.1 h1:SKo5g78QvlXx+JniYGvgohsB/5dm6bdj6ccyyUrnLDs= +github.com/keikoproj/kubedog v0.0.1/go.mod h1:8aRJYCL//c+RvycK3qsAfuHqyS1EP7Pa4g8M+t1wO3M= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.17.2 h1:NF1UFXcKN7/OOv1uxdRz3qfra8AHsPav5M93hlV9+Dc= +k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= +k8s.io/apimachinery v0.17.2 h1:hwDQQFbdRlpnnsR64Asdi55GyCaIP/3WQpMmbNBeWr4= +k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/client-go v0.17.2 h1:ndIfkfXEGrNhLIgkr0+qhRguSD3u6DCmonepn1O6NYc= +k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/test-bdd/main_test.go b/test-bdd/main_test.go index e436feba..58589c3e 100644 --- a/test-bdd/main_test.go +++ b/test-bdd/main_test.go @@ -1,4 +1,4 @@ -package main +package test import ( "os" @@ -7,11 +7,13 @@ import ( "github.com/cucumber/godog" kdog "github.com/keikoproj/kubedog" - "github.com/keikoproj/upgrade-manager/pkg/log" + "github.com/sirupsen/logrus" ) var t kdog.Test +var log = logrus.New() + func TestMain(m *testing.M) { opts := godog.Options{ Format: "pretty", diff --git a/test-bdd/templates/rolling-upgrade.yaml b/test-bdd/templates/rolling-upgrade.yaml index 6ad892ec..f0d708ed 100644 --- a/test-bdd/templates/rolling-upgrade.yaml +++ b/test-bdd/templates/rolling-upgrade.yaml @@ -5,17 +5,15 @@ metadata: namespace: upgrade-manager-system spec: asgName: upgrademgr-eks-nightly-ASG - nodeIntervalSeconds: 90 - postDrain: {} - postDrainDelaySeconds: 30 - postDrainScript: | - echo "Hello, postDrain!" - postDrainWaitScript: | - echo "Hello, postDrainWait!" - postTerminate: {} - postTerminateScript: | - echo "Hello, postTerminate!" - preDrain: {} + nodeIntervalSeconds: 300 + postDrainDelaySeconds: 90 + postDrain: + script: echo "Hello, postDrain!" + postWaitScript: echo "Hello, postDrainWait!" + postTerminate: + script: echo "Hello, postTerminate!" + preDrain: + script: echo "Hello, preDrain!" strategy: mode: eager drainTimeout: 600 diff --git a/testbin/setup-envtest.sh b/testbin/setup-envtest.sh new file mode 100644 index 00000000..783f930d --- /dev/null +++ b/testbin/setup-envtest.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash + +# Copyright 2020 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o pipefail + +# Turn colors in this script off by setting the NO_COLOR variable in your +# environment to any value: +# +# $ NO_COLOR=1 test.sh +NO_COLOR=${NO_COLOR:-""} +if [ -z "$NO_COLOR" ]; then + header=$'\e[1;33m' + reset=$'\e[0m' +else + header='' + reset='' +fi + +function header_text { + echo "$header$*$reset" +} + +function setup_envtest_env { + header_text "setting up env vars" + + # Setup env vars + KUBEBUILDER_ASSETS=${KUBEBUILDER_ASSETS:-""} + if [[ -z "${KUBEBUILDER_ASSETS}" ]]; then + export KUBEBUILDER_ASSETS=$1/bin + fi +} + +# fetch k8s API gen tools and make it available under envtest_root_dir/bin. +# +# Skip fetching and untaring the tools by setting the SKIP_FETCH_TOOLS variable +# in your environment to any value: +# +# $ SKIP_FETCH_TOOLS=1 ./check-everything.sh +# +# If you skip fetching tools, this script will use the tools already on your +# machine. +function fetch_envtest_tools { + SKIP_FETCH_TOOLS=${SKIP_FETCH_TOOLS:-""} + if [ -n "$SKIP_FETCH_TOOLS" ]; then + return 0 + fi + + tmp_root=/tmp + envtest_root_dir=$tmp_root/envtest + + k8s_version="${ENVTEST_K8S_VERSION:-1.19.2}" + goarch="$(go env GOARCH)" + goos="$(go env GOOS)" + + if [[ "$goos" != "linux" && "$goos" != "darwin" ]]; then + echo "OS '$goos' not supported. Aborting." >&2 + return 1 + fi + + local dest_dir="${1}" + + # use the pre-existing version in the temporary folder if it matches our k8s version + if [[ -x "${dest_dir}/bin/kube-apiserver" ]]; then + version=$("${dest_dir}"/bin/kube-apiserver --version) + if [[ $version == *"${k8s_version}"* ]]; then + header_text "Using cached envtest tools from ${dest_dir}" + return 0 + fi + fi + + header_text "fetching envtest tools@${k8s_version} (into '${dest_dir}')" + envtest_tools_archive_name="kubebuilder-tools-$k8s_version-$goos-$goarch.tar.gz" + envtest_tools_download_url="https://storage.googleapis.com/kubebuilder-tools/$envtest_tools_archive_name" + + envtest_tools_archive_path="$tmp_root/$envtest_tools_archive_name" + if [ ! -f $envtest_tools_archive_path ]; then + curl -sL ${envtest_tools_download_url} -o "$envtest_tools_archive_path" + fi + + mkdir -p "${dest_dir}" + tar -C "${dest_dir}" --strip-components=1 -zvxf "$envtest_tools_archive_path" +}