diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..09f2634 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,63 @@ +name: Tests +on: + push: + branches: + - main + pull_request: + +jobs: + call-inclusive-naming-check: + name: Inclusive Naming + uses: canonical-web-and-design/Inclusive-naming/.github/workflows/woke.yaml@main + with: + fail-on-error: "true" + + lint-unit: + name: Lint Unit + uses: charmed-kubernetes/workflows/.github/workflows/lint-unit.yaml@main + with: + python: "['3.8', '3.9', '3.10', '3.11']" + needs: + - call-inclusive-naming-check + + charmcraft-build: + name: Build Charm + runs-on: ubuntu-22.04 + steps: + - name: Check out code + uses: actions/checkout@v3 + - name: Install and prepare LXD snap environment + run: | + sudo apt-get remove -qy lxd lxd-client | true + sudo snap list lxd | true + sudo snap install lxd --channel=latest/stable + sudo snap refresh lxd --channel=latest/stable + sudo lxd waitready + sudo lxd init --auto + sudo chmod a+wr /var/snap/lxd/common/lxd/unix.socket + sudo lxc network set lxdbr0 ipv6.address none + sudo usermod -a -G lxd $USER + sg lxd -c 'lxc version' + - name: Remove Docker + run: | + # https://github.com/canonical/lxd-cloud/blob/f20a64a8af42485440dcbfd370faf14137d2f349/test/includes/lxd.sh#L13-L23 + sudo rm -rf /etc/docker + sudo apt-get purge moby-buildx moby-engine moby-cli moby-compose moby-containerd moby-runc -y + sudo iptables -P FORWARD ACCEPT + - name: Install Charmcraft + run: | + sudo snap install charmcraft --classic --channel=latest/stable + - name: Build Charm + run: | + sg lxd -c 'charmcraft pack -v' + - name: Upload charm artifact + uses: actions/upload-artifact@v3 + with: + name: gcp-cloud-provider.charm + path: ./cp-cloud-provider*.charm + - name: Upload debug artifacts + if: ${{ failure() }} + uses: actions/upload-artifact@v3 + with: + name: charmcraft-logs + path: /home/runner/snap/charmcraft/common/cache/charmcraft/log/charmcraft-*.log diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d779b38 --- /dev/null +++ b/.gitignore @@ -0,0 +1,9 @@ +venv/ +build/ +*.charm +.tox/ +htmlcov/ +.coverage +__pycache__/ +*.py[cod] +juju-crashdump-* \ No newline at end of file diff --git a/.wokeignore b/.wokeignore new file mode 100644 index 0000000..3b49196 --- /dev/null +++ b/.wokeignore @@ -0,0 +1 @@ +upstream/**/*.yaml \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..cd91c09 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing + +## Overview + +This documents explains the processes and practices recommended for contributing enhancements to +this operator. + +- Generally, before developing enhancements to this charm, you should consider [opening an bug + ](https://bugs.launchpad.net/charm-gcp-cloud-provider) explaining your use case. +- If you would like to chat with us about your use-cases or proposed implementation, you can reach + us at [Canonical Mattermost public channel](https://chat.charmhub.io/charmhub/channels/charm-dev) + or [Discourse](https://discourse.charmhub.io/). +- Familiarising yourself with the [Charmed Operator Framework](https://juju.is/docs/sdk) library + will help you a lot when working on new features or bug fixes. +- All enhancements require review before being merged. Code review typically examines + - code quality + - test coverage + - user experience for Juju administrators this charm. +- Please help us out in ensuring easy to review branches by rebasing your pull request branch onto + the `main` branch. This also avoids merge commits and creates a linear Git commit history. + +## Developing + +You can use the environments created by `tox` for development: + +```shell +tox --notest -e unit +source .tox/unit/bin/activate +``` + +### Testing + +```shell +tox -e format # update your code according to linting rules +tox -e lint # code style +tox -e unit # unit tests +tox -e integration # integration tests +tox # runs 'lint' and 'unit' environments +``` + +## Build charm + +Build the charm in this git repository using: + +```shell +charmcraft pack +``` + +### Deploy + +```bash +# Create a model +juju add-model dev +# Enable DEBUG logging +juju model-config logging-config="=INFO;unit=DEBUG" +# Deploy the charm +juju deploy gcp-integrator +juju deploy ./gcp-cloud-provider_ubuntu-*.charm +juju relate gcp-integrator gcp-cloud-provider +juju relate gcp-cloud-provider:external-cloud-provider kubernetes-control-plane +``` + +## Canonical Contributor Agreement + +Canonical welcomes contributions to the GCP Cloud Provider Operator. Please check +out our [contributor agreement](https://ubuntu.com/legal/contributors) if +you're interested in contributing to the solution. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..bd74a78 --- /dev/null +++ b/README.md @@ -0,0 +1,64 @@ +# aws-cloud-provider + +## Description + +This subordinate charm manages the cloud controller-manager components for aws. + +## Requirements +* these polices are defined as [prerequisites](https://cloud-provider-aws.sigs.k8s.io/prerequisites/) +* the primary unit, the aws-integrator application must have access to create IAM Policies + +## Usage + +The charm requires aws credentials and connection information, which +can be provided the `aws-integration` relation to the [AWS Integrator charm](https://charmhub.io/aws-integrator). + +## Deployment + +### Quickstart +The AWS Cloud Provider subordinate charm can be deployed alongside Charmed Kubernetes using the overlay provided in the [Charmed Kubernetes bundle repository](https://github.com/charmed-kubernetes/bundle/blob/main/overlays/aws-overlay.yaml): + +```bash +juju deploy charmed-kubernetes --overlay aws-cloud-overlay.yaml +``` + +### The full process + +```bash +juju deploy charmed-kubernetes +juju deploy aws-integrator --trust +juju deploy aws-cloud-provider + +juju relate aws-cloud-provider:certificates easyrsa +juju relate aws-cloud-provider:kube-control kubernetes-control-plane +juju relate aws-cloud-provider:external-cloud-provider kubernetes-control-plane +juju relate aws-cloud-provider:aws-integration aws-integrator + +## wait for the aws controller daemonset to be running +# the cloud-controller will set the node's ProviderID +kubectl describe nodes |egrep "Taints:|Name:|Provider" +``` + +### Storage +* to access Native AWS storage, see the [AWS Storage charm](https://charmhub.io/aws-k8s-storage). + +### Details + +* Requires a `charmed-kubernetes` deployment on a aws cloud launched by juju with the `allow-privileged` flag enabled. +* Deploy the `aws-integrator` charm into the model using `--trust` so juju provided vsphere credentials +* Deploy the `aws-cloud-provider` charm in the model relating to the integrator and to charmed-kubernetes components +* Once the model is active/idle, the cloud-provider charm will have successfully deployed the aws controller-manager + in the kube-system namespace +* Taint the existing nodes so the controller will apply the correct provider id to those nodes. +* Confirm the `ProviderID` is set on each node +* For the controller to operate, the aws-integrator charm will apply the appropriate IAM policies and standardize the cluster-tag +* the Kubernetes-Worker and Kuberenetes-Control-Plane charms start their binaries with `--external-provider` rather than the + in-tree switch `--cloud-provider=aws` which has been removed starting in kubernetes 1.27 + + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +[CONTRIBUTING.md](https://github.com/canonical/aws-cloud-provider/blob/main/CONTRIBUTING.md) +for developer guidance. diff --git a/actions.yaml b/actions.yaml new file mode 100644 index 0000000..5a57910 --- /dev/null +++ b/actions.yaml @@ -0,0 +1,45 @@ +list-versions: + description: List Cloud Provider and Storage Versions supported by this charm +list-resources: + description: List Cloud Provider and Storage Resources of configured version + params: + controller: + type: string + default: "" + description: | + Filter list based on "provider" or "storage" manifests. + resources: + type: string + default: "" + description: | + Space separated list of kubernetes resource types to filter list result +scrub-resources: + description: Remove deployments other than the current one + params: + controller: + type: string + default: "" + description: | + Filter list based on "provider" or "storage" manifests. + resources: + type: string + default: "" + description: | + Space separated list of kubernetes resource types to filter scrubbing +sync-resources: + description: | + Add kubernetes resources which should be created by this charm which aren't + present within the cluster. + params: + controller: + type: string + default: "" + description: | + Filter list based on "storage" manifests. + resources: + type: string + default: "" + description: | + Space separated list of kubernetes resource types + to use a filter during the sync. This helps limit + which missing resources are applied. diff --git a/charmcraft.yaml b/charmcraft.yaml new file mode 100644 index 0000000..d227450 --- /dev/null +++ b/charmcraft.yaml @@ -0,0 +1,26 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config + +# Architectures based on supported arch's in upstream +# https://github.com/kubernetes/cloud-provider-aws/blob/f33bf21384e7fba50052b1fb8774b76ffd268d50/Makefile +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + architectures: ["amd64"] + run-on: + - name: "ubuntu" + channel: "20.04" + architectures: + - amd64 + - name: "ubuntu" + channel: "22.04" + architectures: + - amd64 +parts: + charm: + build-packages: [git] + charm-python-packages: [setuptools, pip] + prime: + - upstream/** diff --git a/config.yaml b/config.yaml new file mode 100644 index 0000000..e89e15d --- /dev/null +++ b/config.yaml @@ -0,0 +1,50 @@ +options: + # Global options + control-node-selector: + description: | + Specifies to which nodes this charm adds the gcp-ccm daemonsets + Declare node labels in key=value format, separated by spaces. + It's also valid to not have a value, this is interpretted as an empty string. + + Required if not related to kubernetes-control-plane:kube-control + + e.g. + node-role.kubernetes.io/control-plane=custom-value + node-role.kubernetes.io/control-plane= + type: string + + provider-release: + type: string + description: | + Specify the version of cloud-provider as defined by the `releases` + directory of https://github.com/kubernetes/cloud-provider-gcp/ + + example) + juju config gcp-cloud-provider provider-release='v1.26.1' + + A list of supported versions is available through the action: + juju run-action gcp-cloud-provider/0 list-releases --wait + + To reset by to the latest supported by the charm use: + juju config gcp-cloud-provider --reset provider-release + + The current release deployed is available by viewing + juju status gcp-cloud-provider + + controller-extra-args: + type: string + default: "" + description: | + Space separated list of flags and key=value pairs that will be passed as arguments to + gcp-cloud-controller-manager. For example a value like this: + cluster_cidr=192.160.0.0/16 v=3 + will result in gcp-cloud-controller-manager being run with the following options: + --cluster_cidr=192.160.0.0/16 --v=3 + + enable-loadbalancers: + type: boolean + default: False + description: | + Enable the cloud-controller-manager to create public load-balancers. + Primarily this alters the ClusterRole RBAC permissions allowing the + cloud-controller-manager to update configmaps and services in all namespaces \ No newline at end of file diff --git a/icon.svg b/icon.svg new file mode 100644 index 0000000..fcc398b --- /dev/null +++ b/icon.svg @@ -0,0 +1,108 @@ + + + + + + image/svg+xml + + eclispe-che + + + + + + eclispe-che + Created with Sketch. + + + + + + + + + + + + + + + + + diff --git a/metadata.yaml b/metadata.yaml new file mode 100644 index 0000000..9196481 --- /dev/null +++ b/metadata.yaml @@ -0,0 +1,33 @@ +# Copyright 2022 Canonical, Ltd. +# See LICENSE file for licensing details. +name: gcp-cloud-provider +display-name: GCP Cloud Provider +summary: Runs the GCP Cloud Provider in the cluster. +source: https://github.com/charmed-kubernetes/gcp-cloud-provider +issues: https://bugs.launchpad.net/charm-gcp-cloud-provider +docs: https://discourse.charmhub.io/t/gcp-cloud-provider-docs-index/???? +description: >- + The gcp cloud provider provides the Kubernetes cluster access to + native resources from gcp such as load-balancers +subordinate: true +series: + - jammy + - focal +tags: + - kubernetes + - cloud-provider + - gcp +provides: + external-cloud-provider: + interface: external_cloud_provider + limit: 1 +requires: + gcp-integration: + interface: gcp-integration + scope: container + limit: 1 + kube-control: + interface: kube-control + limit: 1 + certificates: + interface: tls-certificates diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..1a02987 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,43 @@ +# Copyright 2023 Canonical Ltd. +# See LICENSE file for licensing details. + +# Testing tools configuration +[tool.coverage.run] +branch = true + +[tool.coverage.report] +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" +asyncio_mode = "auto" + +# Formatting tools configuration +[tool.black] +line-length = 99 +target-version = ["py310"] + +[tool.isort] +profile = "black" + +# Linting tools configuration +[tool.flake8] +max-line-length = 99 +max-doc-length = 99 +max-complexity = 10 +exclude = [".git", "__pycache__", ".tox", "build", "dist", "*.egg_info", "venv"] +select = ["E", "W", "F", "C", "N", "R", "D", "H"] +# Ignore W503, E501 because using black creates errors with this +# Ignore D107 Missing docstring in __init__ +ignore = ["W503", "E501", "D107"] +# D100, D101, D102, D103: Ignore missing docstrings in tests +per-file-ignores = ["tests/*:D100,D101,D102,D103,D104"] +docstring-convention = "google" +# Check for properly formatted copyright header in each file +copyright-check = "True" +copyright-author = "Canonical Ltd." +copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s" + +[tool.mypy] +mypy_path = "src:lib" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..d9a7819 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +backports.cached-property +ops>=1.3.0,<2.0.0 +lightkube>=0.10.1,<1.0.0 +pyyaml +pydantic==1.* +ops.manifest>=1.1.0,<2.0.0 +git+https://github.com/charmed-kubernetes/interface-kube-control.git@6dd289d1c795fdeda1bed17873b8d6562227c829#subdirectory=ops +git+https://github.com/charmed-kubernetes/interface-tls-certificates.git@339efe3823b9728d16cdf5bcd1fc3b5de4e68923#subdirectory=ops \ No newline at end of file diff --git a/src/charm.py b/src/charm.py new file mode 100755 index 0000000..e85c56a --- /dev/null +++ b/src/charm.py @@ -0,0 +1,221 @@ +#!/usr/bin/env python3 +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +"""Dispatch logic for the GCP Cloud Provider charm.""" + +import logging +from pathlib import Path + +from ops.charm import CharmBase +from ops.framework import StoredState +from ops.interface_kube_control import KubeControlRequirer +from ops.interface_tls_certificates import CertificatesRequires +from ops.main import main +from ops.manifests import Collector, ManifestClientError +from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, WaitingStatus + +from config import CharmConfig +from provider_manifests import GCPProviderManifests +from requires_gcp_integration import GCPIntegratorRequires + +log = logging.getLogger(__name__) + + +class GcpCloudProviderCharm(CharmBase): + """Dispatch logic for the gcp-cloud-provider charm.""" + + CA_CERT_PATH = Path("/srv/kubernetes/ca.crt") + + stored = StoredState() + + def __init__(self, *args): + super().__init__(*args) + + # Relation Validator and datastore + self.integrator = GCPIntegratorRequires(self) + self.kube_control = KubeControlRequirer(self) + self.certificates = CertificatesRequires(self) + # Config Validator and datastore + self.charm_config = CharmConfig(self) + + self.CA_CERT_PATH.parent.mkdir(exist_ok=True) + self.stored.set_default( + config_hash=None, # hashed value of the provider config once valid + deployed=False, # True if the config has been applied after new hash + ) + self.collector = Collector( + GCPProviderManifests( + self, + self.charm_config, + self.integrator, + self.kube_control, + ), + ) + + self.framework.observe(self.on.kube_control_relation_created, self._kube_control) + self.framework.observe(self.on.kube_control_relation_joined, self._kube_control) + self.framework.observe(self.on.kube_control_relation_changed, self._merge_config) + self.framework.observe(self.on.kube_control_relation_broken, self._merge_config) + + self.framework.observe(self.on.certificates_relation_created, self._merge_config) + self.framework.observe(self.on.certificates_relation_changed, self._merge_config) + self.framework.observe(self.on.certificates_relation_broken, self._merge_config) + + self.framework.observe(self.on.external_cloud_provider_relation_joined, self._merge_config) + self.framework.observe(self.on.external_cloud_provider_relation_broken, self._merge_config) + + self.framework.observe(self.on.gcp_integration_relation_joined, self._request_gcp_features) + self.framework.observe(self.on.gcp_integration_relation_changed, self._merge_config) + self.framework.observe(self.on.gcp_integration_relation_broken, self._merge_config) + + self.framework.observe(self.on.list_versions_action, self._list_versions) + self.framework.observe(self.on.list_resources_action, self._list_resources) + self.framework.observe(self.on.scrub_resources_action, self._scrub_resources) + self.framework.observe(self.on.sync_resources_action, self._sync_resources) + self.framework.observe(self.on.update_status, self._update_status) + + self.framework.observe(self.on.install, self._install_or_upgrade) + self.framework.observe(self.on.upgrade_charm, self._install_or_upgrade) + self.framework.observe(self.on.config_changed, self._merge_config) + self.framework.observe(self.on.stop, self._cleanup) + + def _list_versions(self, event): + self.collector.list_versions(event) + + def _list_resources(self, event): + manifests = event.params.get("controller", "") + resources = event.params.get("resources", "") + return self.collector.list_resources(event, manifests, resources) + + def _scrub_resources(self, event): + manifests = event.params.get("controller", "") + resources = event.params.get("resources", "") + return self.collector.scrub_resources(event, manifests, resources) + + def _sync_resources(self, event): + manifests = event.params.get("controller", "") + resources = event.params.get("resources", "") + try: + self.collector.apply_missing_resources(event, manifests, resources) + except ManifestClientError: + msg = "Failed to apply missing resources. API Server unavailable." + event.set_results({"result": msg}) + else: + self.stored.deployed = True + + def _request_gcp_features(self, event): + self.integrator.enable_instance_inspection() + self._merge_config(event=event) + + def _update_status(self, _): + if not self.stored.deployed: + return + + unready = self.collector.unready + if unready: + self.unit.status = WaitingStatus(", ".join(unready)) + else: + self.unit.status = ActiveStatus("Ready") + self.unit.set_workload_version(self.collector.short_version) + self.app.status = ActiveStatus(self.collector.long_version) + + def _kube_control(self, event): + self.kube_control.set_auth_request(self.unit.name) + return self._merge_config(event) + + def _check_kube_control(self, event): + self.unit.status = MaintenanceStatus("Evaluating kubernetes authentication.") + evaluation = self.kube_control.evaluate_relation(event) + if evaluation: + if "Waiting" in evaluation: + self.unit.status = WaitingStatus(evaluation) + else: + self.unit.status = BlockedStatus(evaluation) + return False + if not self.kube_control.get_auth_credentials(self.unit.name): + self.unit.status = WaitingStatus("Waiting for kube-control: unit credentials") + return False + self.kube_control.create_kubeconfig( + self.CA_CERT_PATH, "/root/.kube/config", "root", self.unit.name + ) + self.kube_control.create_kubeconfig( + self.CA_CERT_PATH, "/home/ubuntu/.kube/config", "ubuntu", self.unit.name + ) + return True + + def _check_certificates(self, event): + self.unit.status = MaintenanceStatus("Evaluating certificates.") + evaluation = self.certificates.evaluate_relation(event) + if evaluation: + if "Waiting" in evaluation: + self.unit.status = WaitingStatus(evaluation) + else: + self.unit.status = BlockedStatus(evaluation) + return False + self.CA_CERT_PATH.write_text(self.certificates.ca) + return True + + def _check_config(self): + self.unit.status = MaintenanceStatus("Evaluating charm config.") + evaluation = self.charm_config.evaluate() + if evaluation: + self.unit.status = BlockedStatus(evaluation) + return False + return True + + def _merge_config(self, event): + if not self._check_certificates(event): + return + + if not self._check_kube_control(event): + return + + if not self._check_config(): + return + + self.unit.status = MaintenanceStatus("Evaluating Manifests") + new_hash = 0 + for controller in self.collector.manifests.values(): + evaluation = controller.evaluate() + if evaluation: + self.unit.status = BlockedStatus(evaluation) + return + new_hash += controller.hash() + + self.stored.deployed = False + if self._install_or_upgrade(event, config_hash=new_hash): + self.stored.config_hash = new_hash + self.stored.deployed = True + + def _install_or_upgrade(self, event, config_hash=None): + if self.stored.config_hash == config_hash: + log.info("Skipping until the config is evaluated.") + return True + + self.unit.status = MaintenanceStatus("Deploying GCP Cloud Provider") + self.unit.set_workload_version("") + for controller in self.collector.manifests.values(): + try: + controller.apply_manifests() + except ManifestClientError as e: + self.unit.status = WaitingStatus("Waiting for kube-apiserver") + log.warn(f"Encountered retryable installation error: {e}") + event.defer() + return False + return True + + def _cleanup(self, event): + if self.stored.config_hash: + self.unit.status = MaintenanceStatus("Cleaning up GCP Cloud Provider") + for controller in self.collector.manifests.values(): + try: + controller.delete_manifests(ignore_unauthorized=True) + except ManifestClientError: + self.unit.status = WaitingStatus("Waiting for kube-apiserver") + event.defer() + return + self.unit.status = MaintenanceStatus("Shutting down") + + +if __name__ == "__main__": + main(GcpCloudProviderCharm) diff --git a/src/config.py b/src/config.py new file mode 100644 index 0000000..a1f8716 --- /dev/null +++ b/src/config.py @@ -0,0 +1,74 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +"""Config Management for the gcp-cloud-provider charm.""" + +import logging +from typing import Mapping, Optional + +log = logging.getLogger(__name__) + + +class CharmConfig: + """Representation of the charm configuration.""" + + def __init__(self, charm): + """Creates a CharmConfig object from the configuration data.""" + self.charm = charm + + @property + def control_node_selector(self) -> Optional[Mapping[str, str]]: + """Parse charm config for node selector into a dict.""" + value = self.charm.config.get("control-node-selector") + if value: + object_value = {} + for label in value.split(" "): + key, value = label.split("=") + object_value[key] = value + return object_value + return None + + @property + def controller_extra_args(self) -> Mapping[str, str]: + """Parse charm config for controller extra args into a dict.""" + elements = self.charm.config.get("controller-extra-args", "").split() + args = {} + for element in elements: + if "=" in element: + key, _, value = element.partition("=") + args[key] = value + else: + args[element] = "true" + return args + + @property + def safe_control_node_selector(self) -> Optional[Mapping[str, str]]: + """Parse charm config for node selector into a dict, return None on failure.""" + try: + return self.control_node_selector + except ValueError: + return None + + def evaluate(self) -> Optional[str]: + """Determine if configuration is valid.""" + try: + self.control_node_selector + except ValueError: + return "Config control-node-selector is invalid." + return None + + @property + def available_data(self): + """Parse valid charm config into a dict, drop keys if unset.""" + data = {} + for key, value in self.charm.config.items(): + if key == "control-node-selector": + value = self.safe_control_node_selector + if key == "controller-extra-args": + value = self.controller_extra_args + data[key] = value + + for key, value in dict(**data).items(): + if value == "" or value is None: + del data[key] + + return data diff --git a/src/provider_manifests.py b/src/provider_manifests.py new file mode 100644 index 0000000..51d6bc6 --- /dev/null +++ b/src/provider_manifests.py @@ -0,0 +1,215 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +"""Implementation of gcp specific details of the kubernetes manifests.""" +import logging +import pickle +from hashlib import md5 +from typing import Dict, Optional + +from lightkube.codecs import AnyResource, from_dict +from lightkube.models.core_v1 import ( + ConfigMapVolumeSource, + EnvVar, + SecretVolumeSource, + Toleration, + Volume, + VolumeMount, +) +from lightkube.models.rbac_v1 import PolicyRule +from ops.manifests import Addition, ManifestLabel, Manifests, Patch + +log = logging.getLogger(__file__) +NAMESPACE = "kube-system" +SECRET_NAME = "gcp-cloud-secret" +SECRET_DATA = "gcp-creds" +GCP_CONFIG_NAME = "cloudconfig" +GCP_CONFIG_DATA = "cloud.config" + + +class CreateSecret(Addition): + """Create secret for the deployment.""" + + def __call__(self) -> Optional[AnyResource]: + """Craft the secrets object for the deployment.""" + secret_config = {SECRET_DATA: self.manifests.config.get(SECRET_DATA)} + if any(s is None for s in secret_config.values()): + log.error("secret data item is None") + return None + + log.info("Encoding secret data for cloud-controller.") + return from_dict( + dict( + apiVersion="v1", + kind="Secret", + type="Opaque", + metadata=dict(name=SECRET_NAME, namespace=NAMESPACE), + data=secret_config, + ) + ) + + +class CreateCloudConfig(Addition): + """Create cloud-config for the deployment.""" + + def __call__(self) -> Optional[AnyResource]: + """Craft the ConfigMap object for the deployment.""" + log.info("Encode cloud-config for cloud-controller.") + return from_dict( + dict( + apiVersion="v1", + kind="ConfigMap", + metadata=dict(name=GCP_CONFIG_NAME, namespace=NAMESPACE), + data={GCP_CONFIG_DATA: "[Global]\ntoken-url = nil\nmultizone = true"}, + ) + ) + + +class UpdateControllerDaemonSet(Patch): + """Update the Controller DaemonSet object to target juju control plane.""" + + def __call__(self, obj): + """Update the DaemonSet object in the deployment.""" + if not (obj.kind == "DaemonSet" and obj.metadata.name == "cloud-controller-manager"): + return + node_selector = self.manifests.config.get("control-node-selector") + if not isinstance(node_selector, dict): + log.error( + f"provider control-node-selector was an unexpected type: {type(node_selector)}" + ) + return + obj.spec.template.spec.nodeSelector = node_selector + node_selector_text = " ".join('{0}: "{1}"'.format(*t) for t in node_selector.items()) + log.info(f"Applying provider Control Node Selector as {node_selector_text}") + + current_keys = {toleration.key for toleration in obj.spec.template.spec.tolerations} + missing_tolerations = [ + Toleration( + key=taint.key, + value=taint.value, + effect=taint.effect, + ) + for taint in self.manifests.config.get("control-node-taints", []) + if taint.key not in current_keys + ] + obj.spec.template.spec.tolerations += missing_tolerations + log.info("Adding provider tolerations from control-plane") + + args = [ + ("cloud-provider", "gce"), + ("cloud-config", f"/etc/kubernetes/config/{GCP_CONFIG_DATA}"), + ("controllers", "*"), + ("controllers", "-nodeipam"), + ("v", 4), + ("configure-cloud-routes", "false"), + ("allocate-node-cidrs", "false"), + ("cluster-name", self.manifests.config.get("cluster-name")), + ] + args += list(self.manifests.config.get("controller-extra-args").items()) + containers = obj.spec.template.spec.containers + containers[0].args = [f"--{name}={value}" for name, value in args] + containers[0].command = ["/usr/local/bin/cloud-controller-manager"] + containers[0].env = [ + EnvVar("GOOGLE_APPLICATION_CREDENTIALS", f"/etc/kubernetes/creds/{SECRET_DATA}") + ] + containers[0].volumeMounts = [ + VolumeMount("/etc/kubernetes/config", GCP_CONFIG_NAME, readOnly=True), + VolumeMount("/etc/kubernetes/creds", SECRET_NAME, readOnly=True), + ] + log.info("Adjusting container arguments") + + obj.spec.template.spec.volumes = [ + Volume(name=GCP_CONFIG_NAME, configMap=ConfigMapVolumeSource(name=GCP_CONFIG_NAME)), + Volume(name=SECRET_NAME, secret=SecretVolumeSource(secretName=SECRET_NAME)), + ] + log.info("Adjusting container cloud-config secret") + + +class LoadBalancerSupport(Patch): + """Update cluster role bindings to support creating Public LoadBalancers.""" + + def __call__(self, obj): + """Update the ClusterRole resource.""" + if not ( + obj.kind == "ClusterRole" and obj.metadata.name == "system:cloud-controller-manager" + ): + return + + if not self.manifests.config.get("enable-loadbalancers"): + log.info("Skip Loadbalancer RBAC Rule adjustments.") + return + + obj.rules += [ + PolicyRule( + apiGroups=[""], verbs=["list", "patch", "update", "watch"], resources=["services"] + ), + PolicyRule( + apiGroups=[""], + verbs=["list", "patch", "update", "watch"], + resources=["services/status"], + ), + PolicyRule( + apiGroups=[""], + verbs=["create", "list", "patch", "update", "watch"], + resources=["configmaps"], + ), + ] + log.info("Adjust Loadbalancer RBAC Rules.") + + +class GCPProviderManifests(Manifests): + """Deployment Specific details for cloud-provider-gcp.""" + + def __init__(self, charm, charm_config, integrator, kube_control): + manipulations = [ + CreateCloudConfig(self), + CreateSecret(self), + ManifestLabel(self), + UpdateControllerDaemonSet(self), + LoadBalancerSupport(self), + ] + super().__init__( + "cloud-provider-gcp", charm.model, "upstream/cloud_provider", manipulations + ) + self.charm_config = charm_config + self.integrator = integrator + self.kube_control = kube_control + + @property + def config(self) -> Dict: + """Returns current config available from charm config and joined relations.""" + config = {} + if self.integrator.is_ready: + config[SECRET_DATA] = self.integrator.credentials.decode() + if self.kube_control.is_ready: + config["image-registry"] = self.kube_control.get_registry_location() + config["control-node-taints"] = self.kube_control.get_controller_taints() or [ + Toleration("NoSchedule", "node-role.kubernetes.io/control-plane"), + Toleration("NoSchedule", "node.cloudprovider.kubernetes.io/uninitialized", "true"), + ] # by default + config["control-node-selector"] = { + label.key: label.value for label in self.kube_control.get_controller_labels() + } or {"juju-application": self.kube_control.relation.app.name} + config["cluster-name"] = self.kube_control.get_cluster_tag() + + config.update(**self.charm_config.available_data) + + for key, value in dict(**config).items(): + if value == "" or value is None: + del config[key] + + config["release"] = config.pop("provider-release", None) + + return config + + def hash(self) -> int: + """Calculate a hash of the current configuration.""" + return int(md5(pickle.dumps(self.config)).hexdigest(), 16) + + def evaluate(self) -> Optional[str]: + """Determine if manifest_config can be applied to manifests.""" + props = ["control-node-selector", "cluster-name", SECRET_DATA] + for prop in props: + value = self.config.get(prop) + if not value: + return f"Provider manifests waiting for definition of {prop}" + return None diff --git a/src/requires_gcp_integration.py b/src/requires_gcp_integration.py new file mode 100644 index 0000000..256ead3 --- /dev/null +++ b/src/requires_gcp_integration.py @@ -0,0 +1,161 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +"""Implementation of tls-certificates interface. + +This only implements the requires side, currently, since the providers +is still using the Reactive Charm framework self. +""" +import base64 +import json +import logging +import os +import random +import string +from typing import Mapping, Optional +from urllib.parse import urljoin +from urllib.request import Request, urlopen + +from backports.cached_property import cached_property +from ops.charm import RelationBrokenEvent +from ops.framework import Object, StoredState +from pydantic import BaseModel, Json, SecretStr, ValidationError, validator + +log = logging.getLogger(__name__) + + +# block size to read data from GCP metadata service +# (realistically, just needs to be bigger than ~20 chars) +READ_BLOCK_SIZE = 2048 + + +class Data(BaseModel): + """Databag for information shared over the relation.""" + + completed: Json[Mapping[str, str]] + credentials: Json[SecretStr] + + @validator("credentials") + def must_be_json(cls, s: Json[SecretStr]): + """Validate cloud-sa is base64 encoded json.""" + secret_val = s.get_secret_value() + try: + json.loads(secret_val) + except json.JSONDecodeError: + raise ValueError("Couldn't find json data") + return s + + +class GCPIntegratorRequires(Object): + """Requires side of gcp-integration relation.""" + + stored = StoredState() + + # https://cloud.google.com/compute/docs/storing-retrieving-metadata + _metadata_url = "http://metadata.google.internal/computeMetadata/v1/" + _instance_url = urljoin(_metadata_url, "instance/name") + _zone_url = urljoin(_metadata_url, "instance/zone") + _metadata_headers = {"Metadata-Flavor": "Google"} + + def __init__(self, charm, endpoint="gcp-integration"): + super().__init__(charm, f"relation-{endpoint}") + self.endpoint = endpoint + events = charm.on[endpoint] + self._unit_name = self.model.unit.name.replace("/", "_") + self.framework.observe(events.relation_joined, self._joined) + self.stored.set_default( + instance=None, # stores the instance name + zone=None, # stores the zone of this instance + ) + + def _joined(self, event): + to_publish = self.relation.data[self.model.unit] + to_publish["charm"] = self.model.app.name + to_publish["instance"] = self.instance + to_publish["zone"] = self.zone + to_publish["model-uuid"] = os.environ["JUJU_MODEL_UUID"] + + @cached_property + def relation(self): + """The relation to the integrator, or None.""" + return self.model.get_relation(self.endpoint) + + @cached_property + def _raw_data(self): + if self.relation and self.relation.units: + return self.relation.data[list(self.relation.units)[0]] + return None + + @cached_property + def _data(self) -> Optional[Data]: + raw = self._raw_data + return Data(**raw) if raw else None + + def evaluate_relation(self, event) -> Optional[str]: + """Determine if relation is ready.""" + no_relation = not self.relation or ( + isinstance(event, RelationBrokenEvent) and event.relation is self.relation + ) + if not self.is_ready: + if no_relation: + return f"Missing required {self.endpoint}" + return f"Waiting for {self.endpoint}" + return None + + @property + def instance(self): + """This unit's instance name.""" + if self.stored.instance is None: + req = Request(self._instance_url, headers=self._metadata_headers) + with urlopen(req) as fd: + instance = fd.read(READ_BLOCK_SIZE).decode("utf8").strip() + self.stored.instance = instance + return self.stored.instance + + @property + def zone(self): + """The zone this unit is in.""" + if self.stored.zone is None: + req = Request(self._zone_url, headers=self._metadata_headers) + with urlopen(req) as fd: + zone = fd.read(READ_BLOCK_SIZE).decode("utf8").strip() + zone = zone.split("/")[-1] + self.stored.zone = zone + return self.stored.zone + + @property + def is_ready(self): + """Whether the request for this instance has been completed.""" + try: + self._data + except ValidationError as ve: + log.error(f"{self.endpoint} relation data not yet valid. ({ve}") + return False + if self._data is None: + log.error(f"{self.endpoint} relation data not yet available.") + return False + last_completed = self._data.completed.get(self.instance) + last_requested = self.relation.data[self.model.unit].get("requested") + log.info(f"{self.endpoint} completion {last_completed}?={last_requested}.") + return last_requested and last_completed == last_requested + + def _request(self, keyvals): + alphabet = string.ascii_letters + string.digits + nonce = "".join(random.choice(alphabet) for _ in range(8)) + to_publish = self.relation.data[self.model.unit] + to_publish.update({k: json.dumps(v) for k, v in keyvals.items()}) + to_publish["requested"] = nonce + + @property + def credentials(self) -> Optional[bytes]: + """Return credentials from integrator charm.""" + if not self.is_ready or not self._data: + return None + return base64.b64encode(self._data.credentials.get_secret_value().encode()) + + def enable_instance_inspection(self): + """Request the ability to manage block storage.""" + self._request({"enable-instance-inspection": True}) + + def enable_block_storage_management(self): + """Request the ability to manage block storage.""" + self._request({"enable-block-storage-management": True}) diff --git a/tests/data/certificates_data.yaml b/tests/data/certificates_data.yaml new file mode 100644 index 0000000..5d63c7a --- /dev/null +++ b/tests/data/certificates_data.yaml @@ -0,0 +1,112 @@ +ca: |- + -----BEGIN CERTIFICATE----- + MIIDUTCCAjmgAwIBAgIUPrhr50UDT48ExXBYGWYeE4vsDcwwDQYJKoZIhvcNAQEL + BQAwGDEWMBQGA1UEAwwNMTAuMTMwLjcyLjE5MDAeFw0yMjA1MTcxOTQyMTBaFw0z + MjA1MTQxOTQyMTBaMBgxFjAUBgNVBAMMDTEwLjEzMC43Mi4xOTAwggEiMA0GCSqG + SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCmWWxe/vA/ZabW9lwk+6McnLT2WLZbamGc + EYIeMm9Kv9EIUtRpIHFr+q5ssn2FI7TLw2QZsMA8Yi+oPXMob9nPn2L9/2IVqHC/ + VqYWryan/3eIThpGQ23RvqxszJ/1p1rZVBjA+kZny0kLZpDdIYvFg3CCU+1lKDSV + pqFDfr3/gmLZljykyt4LPbgH9skC/+r46f4jlgJkyWvEU86ckIX5P59BK8/rbf+n + GDH1Yxa2q9LkDBXpNxhofyYEzxQaG0dWKjoJNEB3BDyMojXTLzeJg5s0GNIVWiCS + AbA5zHFhvq7FsAQ2X8oX1uGvgDqW6zeFoyZRmz3EFUcD2OG9oAylAgMBAAGjgZIw + gY8wHQYDVR0OBBYEFDLq9/p4R07O84D4EY5mXKdSEKtuMFMGA1UdIwRMMEqAFDLq + 9/p4R07O84D4EY5mXKdSEKtuoRykGjAYMRYwFAYDVQQDDA0xMC4xMzAuNzIuMTkw + ghQ+uGvnRQNPjwTFcFgZZh4Ti+wNzDAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIB + BjANBgkqhkiG9w0BAQsFAAOCAQEAPq0iPuWK/MKBgcshRL79TgjnOBvQ5D20Is8Z + YP29W6iCpUz8Z9ERw2GmbkvQJgTHvfcLb5O3ajNv2ih5Wrfd9mho1qzr5ji+mVNk + HtWAx4C2UtnUKyoSsNPDy5SBrG0JPIXbsWYpVBsDxcZpI/xctBLOlG5oks3E32HR + D2vVK2ZLfAwiBGX7EQSWGP8ued++GmrEreOyqTM3x0VOyUWscbawy1djRBaWYjJX + m/J6HZ/xyZJoPB3wxYX0PQ8IlRqpCBhN/iay/bPDyinoEZroQPeAPCiRVwVzUn73 + ogB5SM6ViHdtTHvUuJhDGHSSaDhtb++gE2JKLsbqM1ZFQ+KtLg== + -----END CERTIFICATE----- +client.cert: |- + Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: CN=10.130.72.190 + Validity + Not Before: May 17 19:42:21 2022 GMT + Not After : May 14 19:42:21 2032 GMT + Subject: CN=client + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + RSA Public-Key: (2048 bit) + Modulus: + 00:ba:b4:8e:c0:8a:08:9a:b4:12:94:c1:4c:78:de: + 5f:9a:23:93:c8:62:92:fc:f4:26:d2:81:ac:88:22: + ae:24:f1:99:46:d3:e8:69:71:f3:1c:f7:9e:f7:a8: + 00:df:eb:64:5e:67:00:a2:f3:b4:c2:33:c2:83:75: + 99:d4:ef:db:0b:b7:ad:f9:42:cd:76:37:8a:5f:a8: + 0e:c8:d5:43:5b:ae:30:e5:0f:20:58:19:c6:5b:a5: + 9d:a4:4a:08:7c:86:77:c2:4f:2c:5c:4e:b5:ba:3c: + 2f:35:f4:6d:90:e0:e3:28:6d:71:ce:e8:af:f3:83: + 20:cb:32:f9:67:de:d1:66:3e:06:5c:a6:65:a7:0e: + 83:c7:49:47:65:eb:6d:ca:d8:cf:fc:4c:b4:b8:80: + 3f:63:21:60:b3:0e:88:b4:d7:6e:93:91:d0:23:70: + 96:3e:fe:2c:f6:4f:9b:b9:0f:40:9f:2a:15:b7:41: + 87:4c:03:ff:09:55:70:5b:ca:3d:f2:81:fb:ff:37: + 78:2c:20:55:7a:37:f4:f0:1b:0a:b5:de:94:fd:de: + c1:4c:a0:3b:16:24:2c:0d:77:90:66:96:b6:3b:ad: + 73:5f:0c:41:40:68:97:39:bb:ab:5b:64:13:80:66: + 72:31:0a:05:62:13:b7:89:ed:d9:01:8d:2d:01:71: + c3:73 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + X509v3 Subject Key Identifier: + DB:95:07:14:C6:30:44:21:C8:4C:8C:0B:75:A8:EB:E8:04:B4:C5:28 + X509v3 Authority Key Identifier: + keyid:32:EA:F7:FA:78:47:4E:CE:F3:80:F8:11:8E:66:5C:A7:52:10:AB:6E + DirName:/CN=10.130.72.190 + serial:3E:B8:6B:E7:45:03:4F:8F:04:C5:70:58:19:66:1E:13:8B:EC:0D:CC + + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Key Usage: + Digital Signature + Signature Algorithm: sha256WithRSAEncryption + 1f:52:24:1d:d1:9c:a6:ef:f2:99:76:b7:88:df:13:2f:78:a4: + 0d:97:89:ce:a4:f8:b4:92:e8:74:7f:2f:eb:9c:87:28:ba:d5: + 46:d5:62:f1:89:fa:06:f0:2f:58:e4:f0:a9:38:05:c2:92:5c: + 41:2d:5f:f2:ee:e1:00:c0:32:48:1d:0f:67:48:30:f0:85:97: + 75:73:99:dd:ce:e9:e1:5d:a0:db:08:3c:98:48:ff:94:7d:cd: + b3:31:42:e0:ff:62:f4:ca:86:b2:c4:cd:aa:e6:0c:2c:1b:5b: + 16:a6:b3:fd:82:63:11:92:d8:50:e0:53:be:ce:e3:6b:a3:40: + c3:49:b0:75:ce:25:1a:c7:00:a6:80:23:a3:89:44:bb:79:40: + d1:a0:45:7e:43:a4:f1:e6:58:57:f2:8d:5d:2b:a8:6b:f9:ca: + 63:89:2f:37:f1:2d:cb:77:f5:f8:f3:8f:7d:7a:24:c8:c1:90: + 35:fb:55:98:57:cb:9b:c0:36:31:20:21:70:10:13:3b:50:50: + 48:5e:dc:07:74:c2:ff:7e:ac:96:41:f5:28:7b:5b:3f:1c:81: + d8:05:49:b2:21:00:81:1b:93:ab:60:e1:e3:0d:ec:28:3d:2f: + 66:0f:e6:eb:82:d6:f3:d4:db:11:10:15:34:cd:c1:a0:9b:28: + 12:a5:f5:f9 + -----BEGIN CERTIFICATE----- + MIIDSTCCAjGgAwIBAgIBATANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA0xMC4x + MzAuNzIuMTkwMB4XDTIyMDUxNzE5NDIyMVoXDTMyMDUxNDE5NDIyMVowETEPMA0G + A1UEAwwGY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAurSO + wIoImrQSlMFMeN5fmiOTyGKS/PQm0oGsiCKuJPGZRtPoaXHzHPee96gA3+tkXmcA + ovO0wjPCg3WZ1O/bC7et+ULNdjeKX6gOyNVDW64w5Q8gWBnGW6WdpEoIfIZ3wk8s + XE61ujwvNfRtkODjKG1xzuiv84MgyzL5Z97RZj4GXKZlpw6Dx0lHZettytjP/Ey0 + uIA/YyFgsw6ItNduk5HQI3CWPv4s9k+buQ9AnyoVt0GHTAP/CVVwW8o98oH7/zd4 + LCBVejf08BsKtd6U/d7BTKA7FiQsDXeQZpa2O61zXwxBQGiXOburW2QTgGZyMQoF + YhO3ie3ZAY0tAXHDcwIDAQABo4GkMIGhMAkGA1UdEwQCMAAwHQYDVR0OBBYEFNuV + BxTGMEQhyEyMC3Wo6+gEtMUoMFMGA1UdIwRMMEqAFDLq9/p4R07O84D4EY5mXKdS + EKtuoRykGjAYMRYwFAYDVQQDDA0xMC4xMzAuNzIuMTkwghQ+uGvnRQNPjwTFcFgZ + Zh4Ti+wNzDATBgNVHSUEDDAKBggrBgEFBQcDAjALBgNVHQ8EBAMCB4AwDQYJKoZI + hvcNAQELBQADggEBAB9SJB3RnKbv8pl2t4jfEy94pA2Xic6k+LSS6HR/L+uchyi6 + 1UbVYvGJ+gbwL1jk8Kk4BcKSXEEtX/Lu4QDAMkgdD2dIMPCFl3Vzmd3O6eFdoNsI + PJhI/5R9zbMxQuD/YvTKhrLEzarmDCwbWxams/2CYxGS2FDgU77O42ujQMNJsHXO + JRrHAKaAI6OJRLt5QNGgRX5DpPHmWFfyjV0rqGv5ymOJLzfxLct39fjzj316JMjB + kDX7VZhXy5vANjEgIXAQEztQUEhe3Ad0wv9+rJZB9Sh7Wz8cgdgFSbIhAIEbk6tg + 4eMN7Cg9L2YP5uuC1vPU2xEQFTTNwaCbKBKl9fk= + -----END CERTIFICATE----- +client.key: |- + -----BEGIN PRIVATE KEY----- + Random-Test-Data + -----END PRIVATE KEY----- +egress-subnets: 10.130.72.190/32 +ingress-address: 10.130.72.190 +private-address: 10.130.72.190 diff --git a/tests/data/charm.yaml b/tests/data/charm.yaml new file mode 100644 index 0000000..d707096 --- /dev/null +++ b/tests/data/charm.yaml @@ -0,0 +1,14 @@ +description: Overlay for attaching current charm +applications: + gcp-integrator: + charm: gcp-integrator + channel: edge + num_units: 1 + trust: true + gcp-cloud-provider: + charm: {{charm}} +relations: +- ["gcp-cloud-provider:certificates", "easyrsa:client"] +- ["gcp-cloud-provider:kube-control", "kubernetes-control-plane"] +- ["gcp-cloud-provider:gcp-integration", "gcp-integrator:gcp"] +- ["gcp-cloud-provider:external-cloud-provider", "kubernetes-control-plane"] diff --git a/tests/data/kube_control_data.yaml b/tests/data/kube_control_data.yaml new file mode 100644 index 0000000..48ed9ec --- /dev/null +++ b/tests/data/kube_control_data.yaml @@ -0,0 +1,34 @@ +api-endpoints: '["https://10.246.154.7:6443"]' +cluster-tag: kubernetes-4ypskxahbu3rnfgsds3pksvwe3uh0lxt +cluster-cidr: 192.168.0.0/16 +cohort-keys: |- + { + "cdk-addons": "", + "kube-apiserver": "", + "kube-controller-manager": "", + "kube-proxy": "", + "kube-scheduler": "", + "kubectl": "", + "kubelet": "" + } +creds: |- + { + "gcp-cloud-provider/0": { + "client_token": "admin::reacted", + "kubelet_token": "gcp-cloud-provider/0::reacted", + "proxy_token": "kube-proxy::reacted", + "scope": "kubernetes-worker/0" + } + } +default-cni: '""' +domain: cluster.local +egress-subnets: 10.246.154.7/32 +enable-kube-dns: "True" +has-xcp: "false" +ingress-address: 10.246.154.7 +port: "53" +private-address: 10.246.154.7 +registry-location: rocks.canonical.com:443/cdk +sdn-ip: 10.152.183.20 +taints: '["node-role.kubernetes.io/control-plane=true:NoSchedule"]' +labels: '["node-role.kubernetes.io/control-plane=true"]' diff --git a/tests/data/mock_manifests/manifests/v0.2/component.yaml b/tests/data/mock_manifests/manifests/v0.2/component.yaml new file mode 100644 index 0000000..4b0855b --- /dev/null +++ b/tests/data/mock_manifests/manifests/v0.2/component.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-manifest-manager + namespace: kube-system diff --git a/tests/data/mock_manifests/manifests/v0.3.1/component.yaml b/tests/data/mock_manifests/manifests/v0.3.1/component.yaml new file mode 100644 index 0000000..eb2bc6d --- /dev/null +++ b/tests/data/mock_manifests/manifests/v0.3.1/component.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: test-manifest-manager + namespace: kube-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-manifest-secret + namespace: kube-system +stringData: + # NOTE: this is just an example configuration, update with real values based on your environment + test-manifest.conf: | + I'm secret information +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: test-manifest-deployment + namespace: kube-system +spec: + template: + metadata: + labels: + app: test-manifest-deployment + spec: + serviceAccountName: test-manifest-manager + nodeSelector: + node-role.kubernetes.io/control-plane: "" + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + dnsPolicy: "Default" + containers: + - name: test-container + image: gcr.io/google-samples/hello-app:v1.0 diff --git a/tests/data/mock_manifests/version b/tests/data/mock_manifests/version new file mode 100644 index 0000000..1e66a61 --- /dev/null +++ b/tests/data/mock_manifests/version @@ -0,0 +1 @@ +v0.3.1 \ No newline at end of file diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..8bad1a6 --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,52 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +import logging +import random +import string +from pathlib import Path + +import pytest +from lightkube import AsyncClient, KubeConfig +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.resources.core_v1 import Namespace + +log = logging.getLogger(__name__) + + +@pytest.fixture(scope="module") +def module_name(request): + return request.module.__name__.replace("_", "-") + + +@pytest.fixture() +async def kubeconfig(ops_test): + kubeconfig_path = ops_test.tmp_path / "kubeconfig" + retcode, stdout, stderr = await ops_test.run( + "juju", + "scp", + "kubernetes-control-plane/leader:/home/ubuntu/config", + kubeconfig_path, + ) + if retcode != 0: + log.error(f"retcode: {retcode}") + log.error(f"stdout:\n{stdout.strip()}") + log.error(f"stderr:\n{stderr.strip()}") + pytest.fail("Failed to copy kubeconfig from kubernetes-control-plane") + assert Path(kubeconfig_path).stat().st_size, "kubeconfig file is 0 bytes" + yield kubeconfig_path + + +@pytest.fixture() +async def kubernetes(kubeconfig, module_name): + rand_str = "".join(random.choices(string.ascii_lowercase + string.digits, k=5)) + namespace = f"{module_name}-{rand_str}" + config = KubeConfig.from_file(kubeconfig) + client = AsyncClient( + config=config.get(context_name="juju-context"), + namespace=namespace, + trust_env=False, + ) + namespace_obj = Namespace(metadata=ObjectMeta(name=namespace)) + await client.create(namespace_obj) + yield client + await client.delete(Namespace, namespace) diff --git a/tests/integration/test_gcp_cloud_provider.py b/tests/integration/test_gcp_cloud_provider.py new file mode 100644 index 0000000..9e2040c --- /dev/null +++ b/tests/integration/test_gcp_cloud_provider.py @@ -0,0 +1,45 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +import logging +import shlex +from pathlib import Path + +import pytest +from lightkube.resources.core_v1 import Node + +log = logging.getLogger(__name__) + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test): + charm = next(Path(".").glob("gcp-cloud-provider*.charm"), None) + if not charm: + log.info("Build Charm...") + charm = await ops_test.build_charm(".") + + overlays = [ + ops_test.Bundle("kubernetes-core", channel="edge"), + Path("tests/data/charm.yaml"), + ] + + bundle, *overlays = await ops_test.async_render_bundles(*overlays, charm=charm.resolve()) + + log.info("Deploy Charm...") + model = ops_test.model_full_name + cmd = f"juju deploy -m {model} {bundle} " + " ".join( + f"--overlay={f} --trust" for f in overlays + ) + rc, stdout, stderr = await ops_test.run(*shlex.split(cmd)) + assert rc == 0, f"Bundle deploy failed: {(stderr or stdout).strip()}" + + log.info(stdout) + await ops_test.model.block_until( + lambda: "gcp-cloud-provider" in ops_test.model.applications, timeout=60 + ) + + await ops_test.model.wait_for_idle(wait_for_active=True, timeout=60 * 60) + + +async def test_provider_ids(kubernetes): + async for node in kubernetes.list(Node): + assert node.spec.providerID.startswith("gce://") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 0000000..c813a32 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,23 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +import unittest.mock as mock + +import pytest +from lightkube import ApiError + + +@pytest.fixture() +def api_error_klass(): + class TestApiError(ApiError): + status = mock.MagicMock() + + def __init__(self): + pass + + yield TestApiError + + +@pytest.fixture(autouse=True) +def lk_client(): + with mock.patch("ops.manifests.manifest.Client", autospec=True) as mock_lightkube: + yield mock_lightkube.return_value diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 0000000..954d17e --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,179 @@ +# Copyright 2022 Canonical Ltd. +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest.mock as mock +from ipaddress import ip_network +from pathlib import Path + +import ops.testing +import pytest +import yaml +from ops.model import BlockedStatus, WaitingStatus +from ops.testing import Harness + +from charm import GcpCloudProviderCharm + +ops.testing.SIMULATE_CAN_CONNECT = True + + +@pytest.fixture +def harness(): + harness = Harness(GcpCloudProviderCharm) + try: + yield harness + finally: + harness.cleanup() + + +@pytest.fixture(autouse=True) +def mock_ca_cert(tmpdir): + ca_cert = Path(tmpdir) / "ca.crt" + with mock.patch.object(GcpCloudProviderCharm, "CA_CERT_PATH", ca_cert): + yield ca_cert + + +@pytest.fixture() +def certificates(): + with mock.patch("charm.CertificatesRequires") as mocked: + certificates = mocked.return_value + certificates.ca = "abcd" + certificates.evaluate_relation.return_value = None + yield certificates + + +@pytest.fixture() +def kube_control(): + with mock.patch("charm.KubeControlRequirer") as mocked: + kube_control = mocked.return_value + kube_control.evaluate_relation.return_value = None + kube_control.get_registry_location.return_value = "rocks.canonical.com/cdk" + kube_control.get_controller_taints.return_value = [] + kube_control.get_controller_labels.return_value = [] + kube_control.get_cluster_tag.return_value = "kubernetes-4ypskxahbu3rnfgsds3pksvwe3uh0lxt" + kube_control.get_cluster_cidr.return_value = ip_network("192.168.0.0/16") + kube_control.relation.app.name = "kubernetes-control-plane" + kube_control.relation.units = [f"kubernetes-control-plane/{_}" for _ in range(2)] + yield kube_control + + +@pytest.fixture() +def gcp_integration(): + with mock.patch("charm.GCPIntegratorRequires") as mocked: + integration = mocked.return_value + integration.credentials = b"{}" + integration.evaluate_relation.return_value = None + yield integration + + +def test_waits_for_certificates(harness): + harness.begin_with_initial_hooks() + charm = harness.charm + assert isinstance(charm.unit.status, BlockedStatus) + assert charm.unit.status.message == "Missing required certificates" + + # Test adding the certificates relation + rel_cls = type(charm.certificates) + rel_cls.relation = property(rel_cls.relation.func) + rel_cls._data = property(rel_cls._data.func) + rel_cls._raw_data = property(rel_cls._raw_data.func) + rel_id = harness.add_relation("certificates", "easyrsa") + assert isinstance(charm.unit.status, WaitingStatus) + assert charm.unit.status.message == "Waiting for certificates" + harness.add_relation_unit(rel_id, "easyrsa/0") + assert isinstance(charm.unit.status, WaitingStatus) + assert charm.unit.status.message == "Waiting for certificates" + harness.update_relation_data( + rel_id, + "easyrsa/0", + yaml.safe_load(Path("tests/data/certificates_data.yaml").read_text()), + ) + assert isinstance(charm.unit.status, BlockedStatus) + assert charm.unit.status.message == "Missing required kube-control relation" + + +@mock.patch("ops.interface_kube_control.KubeControlRequirer.create_kubeconfig") +@pytest.mark.usefixtures("certificates") +def test_waits_for_kube_control(mock_create_kubeconfig, harness): + harness.begin_with_initial_hooks() + charm = harness.charm + assert isinstance(charm.unit.status, BlockedStatus) + assert charm.unit.status.message == "Missing required kube-control relation" + + # Add the kube-control relation + rel_cls = type(charm.kube_control) + rel_cls.relation = property(rel_cls.relation.func) + rel_cls._data = property(rel_cls._data.func) + rel_id = harness.add_relation("kube-control", "kubernetes-control-plane") + assert isinstance(charm.unit.status, WaitingStatus) + assert charm.unit.status.message == "Waiting for kube-control relation" + + harness.add_relation_unit(rel_id, "kubernetes-control-plane/0") + assert isinstance(charm.unit.status, WaitingStatus) + assert charm.unit.status.message == "Waiting for kube-control relation" + mock_create_kubeconfig.assert_not_called() + + harness.update_relation_data( + rel_id, + "kubernetes-control-plane/0", + yaml.safe_load(Path("tests/data/kube_control_data.yaml").read_text()), + ) + mock_create_kubeconfig.assert_has_calls( + [ + mock.call(charm.CA_CERT_PATH, "/root/.kube/config", "root", charm.unit.name), + mock.call(charm.CA_CERT_PATH, "/home/ubuntu/.kube/config", "ubuntu", charm.unit.name), + ] + ) + assert isinstance(charm.unit.status, BlockedStatus) + assert charm.unit.status.message == "Provider manifests waiting for definition of gcp-creds" + + +@pytest.mark.usefixtures("certificates", "kube_control", "gcp_integration") +def test_waits_for_config(harness: Harness, lk_client, caplog): + harness.begin_with_initial_hooks() + with mock.patch.object(lk_client, "list") as mock_list: + mock_list.return_value = [mock.Mock(**{"metadata.annotations": {}})] + caplog.clear() + harness.update_config( + { + "control-node-selector": "something.io/my-control-node=", + } + ) + + provider_messages = {r.message for r in caplog.records if "provider" in r.filename} + + assert provider_messages == { + "Adding provider tolerations from control-plane", + "Adjusting container arguments", + "Adjusting container cloud-config secret", + 'Applying provider Control Node Selector as something.io/my-control-node: ""', + "Encoding secret data for cloud-controller.", + "Encode cloud-config for cloud-controller.", + "Skip Loadbalancer RBAC Rule adjustments.", + } + + caplog.clear() + harness.update_config({"control-node-selector": ""}) + provider_messages = {r.message for r in caplog.records if "provider" in r.filename} + + assert provider_messages == { + "Adding provider tolerations from control-plane", + "Adjusting container arguments", + "Adjusting container cloud-config secret", + 'Applying provider Control Node Selector as juju-application: "kubernetes-control-plane"', + "Encoding secret data for cloud-controller.", + "Encode cloud-config for cloud-controller.", + "Skip Loadbalancer RBAC Rule adjustments.", + } + + +def test_install_or_upgrade_apierror(harness: Harness, lk_client, api_error_klass): + lk_client.apply.side_effect = [mock.MagicMock(), api_error_klass] + harness.begin_with_initial_hooks() + charm = harness.charm + charm.stored.config_hash = "mock_hash" + mock_event = mock.MagicMock() + charm._install_or_upgrade(mock_event) + mock_event.defer.assert_called_once() + assert isinstance(charm.unit.status, WaitingStatus) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..0d4a3c1 --- /dev/null +++ b/tox.ini @@ -0,0 +1,102 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +[tox] +skipsdist=True +skip_missing_interpreters = True +envlist = lint, unit + +[vars] +cov_path = {toxinidir}/htmlcov +src_path = {toxinidir}/src/ +tst_path = {toxinidir}/tests/ +upstream_path = {toxinidir}/upstream/ +tst_data_path = {toxinidir}/tests/data/ +all_path = {[vars]src_path} {[vars]tst_path} {[vars]upstream_path} + +[testenv] +setenv = + PYTHONPATH = {toxinidir}:{[vars]src_path} + PYTHONBREAKPOINT=ipdb.set_trace + PY_COLORS=1 +passenv = + PYTHONPATH + CHARM_BUILD_DIR + MODEL_SETTINGS + +[testenv:format] +description = Apply coding style standards to code +deps = + black + isort + ruff +commands = + isort {[vars]all_path} + black {[vars]all_path} + ruff --fix {[vars]all_path} + +[testenv:lint] +description = Check code against coding style standards +deps = + -r requirements.txt + black + ruff + pep8-naming + isort + codespell + mypy + types-PyYAML + types-backports + types-dataclasses +commands = + codespell {toxinidir} --skip {toxinidir}/.git --skip {toxinidir}/.tox \ + --skip {toxinidir}/build --skip {toxinidir}/lib --skip {toxinidir}/venv \ + --skip {toxinidir}/.mypy_cache --skip {toxinidir}/icon.svg \ + --skip {[vars]cov_path} \ + --skip "*.yaml" + ruff {[vars]all_path} + isort --check-only --diff {[vars]all_path} + black --check --diff {[vars]all_path} + mypy --config-file={toxinidir}/tox.ini {[vars]src_path} + +[testenv:unit] +description = Run unit tests +deps = + pytest + pytest-cov + ipdb + -r{toxinidir}/requirements.txt +commands = + pytest --cov={[vars]src_path} \ + --cov-report=term-missing --cov-report=html \ + --ignore={[vars]tst_path}integration \ + -vv --tb native -s \ + {posargs:tests/unit} + +[testenv:integration] +description = Run integration tests +deps = + pytest + pytest-operator + -r{toxinidir}/requirements.txt +commands = + pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs} + +[testenv:update] +deps = + pyyaml + semver +commands = + python {toxinidir}/upstream/update.py {posargs} + +# mypy config +[mypy] + +[mypy-ops.*] +ignore_missing_imports = True + +[mypy-lightkube.*] +ignore_missing_imports = True + +[isort] +profile = black diff --git a/upstream/cloud_provider/manifests/v0.27.1/release.yaml b/upstream/cloud_provider/manifests/v0.27.1/release.yaml new file mode 100644 index 0000000..31ec41a --- /dev/null +++ b/upstream/cloud_provider/manifests/v0.27.1/release.yaml @@ -0,0 +1,356 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: cloud-controller-manager + namespace: kube-system + labels: + component: cloud-controller-manager + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +spec: + selector: + matchLabels: + component: cloud-controller-manager + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + tier: control-plane + component: cloud-controller-manager + spec: + nodeSelector: null + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: cloud-controller-manager + containers: + - name: cloud-controller-manager + image: k8scloudprovidergcp/cloud-controller-manager:latest + imagePullPolicy: IfNotPresent + # ko puts it somewhere else... command: ['/usr/local/bin/cloud-controller-manager'] + args: [] # args must be replaced by tooling + env: + - name: KUBERNETES_SERVICE_HOST + value: "127.0.0.1" + livenessProbe: + failureThreshold: 3 + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10258 + scheme: HTTPS + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 15 + resources: + requests: + cpu: "200m" + volumeMounts: + - mountPath: /etc/kubernetes/cloud.config + name: cloudconfig + readOnly: true + hostNetwork: true + priorityClassName: system-cluster-critical + volumes: + - hostPath: + path: /etc/kubernetes/cloud.config + type: "" + name: cloudconfig +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: cloud-controller-manager + namespace: kube-system + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cloud-controller-manager:apiserver-authentication-reader + namespace: kube-system + labels: + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- + +# https://github.com/kubernetes/cloud-provider-gcp/blob/master/deploy/cloud-node-controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:cloud-controller-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +rules: +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - list + - watch + - update +- apiGroups: + - coordination.k8s.io + resourceNames: + - cloud-controller-manager + resources: + - leases + verbs: + - get + - update +- apiGroups: + - "" + resources: + - endpoints + - serviceaccounts + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - update + - patch # until #393 lands +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - update +- apiGroups: + - "authentication.k8s.io" + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - "*" + resources: + - "*" + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts/token + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: system::leader-locking-cloud-controller-manager + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - watch +- apiGroups: + - "" + resources: + - configmaps + resourceNames: + - cloud-controller-manager + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:controller:cloud-node-controller + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - update + - delete + - patch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - get + - list + - update + - delete + - patch + +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - delete +- apiGroups: + - "" + resources: + - pods/status + verbs: + - list + - delete +--- + +# https://github.com/kubernetes/cloud-provider-gcp/blob/master/deploy/cloud-node-controller-binding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: system::leader-locking-cloud-controller-manager + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system::leader-locking-cloud-controller-manager +subjects: +- kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:cloud-controller-manager + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager +subjects: +- kind: ServiceAccount + apiGroup: "" + name: cloud-controller-manager + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:controller:cloud-node-controller + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:controller:cloud-node-controller +subjects: +- kind: ServiceAccount + name: cloud-node-controller + namespace: kube-system +--- + +# https://github.com/kubernetes/cloud-provider-gcp/blob/master/deploy/pvl-controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: system:controller:pvl-controller + labels: + addonmanager.kubernetes.io/mode: Reconcile + addon.kops.k8s.io/name: gcp-cloud-controller.addons.k8s.io +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumes + verbs: + - list + - watch diff --git a/upstream/cloud_provider/version b/upstream/cloud_provider/version new file mode 100644 index 0000000..04e9457 --- /dev/null +++ b/upstream/cloud_provider/version @@ -0,0 +1 @@ +v0.27.1