diff --git a/charms/worker/charmcraft.yaml b/charms/worker/charmcraft.yaml index 1f845a07..7fc53d9c 100644 --- a/charms/worker/charmcraft.yaml +++ b/charms/worker/charmcraft.yaml @@ -47,11 +47,14 @@ config: type: string description: Snap channel of the k8s snap labels: - default: "node-role.kubernetes.io/worker=" + default: "" type: string description: | Labels can be used to organize and to select subsets of nodes in the - cluster. Declare node labels in key=value format, separated by spaces. + cluster. Declare node labels in key=value format, separated by spaces. + + Note: Due to NodeRestriction, workers are limited to how they can label themselves + https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction parts: charm: build-packages: [git] diff --git a/charms/worker/k8s/src/charm.py b/charms/worker/k8s/src/charm.py index 90a9d99e..60f8c170 100755 --- a/charms/worker/k8s/src/charm.py +++ b/charms/worker/k8s/src/charm.py @@ -68,9 +68,6 @@ def __init__(self, *args): """ super().__init__(*args) factory = UnixSocketConnectionFactory(unix_socket=K8SD_SNAP_SOCKET, timeout=320) - self.label_maker = LabelMaker( - self, kubeconfig_path=self._source_kubeconfig, kubectl=KUBECTL_PATH - ) self.api_manager = K8sdAPIManager(factory) self.cos = COSIntegration(self) self.reconciler = Reconciler(self, self._reconcile) @@ -355,13 +352,14 @@ def _generate_kubeconfig(self): KUBECONFIG.parent.mkdir(parents=True, exist_ok=True) KUBECONFIG.write_bytes(self._source_kubeconfig.read_bytes()) - @status.on_error(ops.WaitingStatus("Waiting to apply node labels"), LabelMaker.NodeLabelError) + @status.on_error(ops.BlockedStatus("Cannot apply node-labels"), LabelMaker.NodeLabelError) def _apply_node_labels(self): """Apply labels to the node.""" status.add(ops.MaintenanceStatus("Apply Node Labels")) node = self.get_node_name() - if self.label_maker.active_labels() is not None: - self.label_maker.apply_node_labels() + labeler = LabelMaker(self, kubeconfig_path=self._source_kubeconfig, kubectl=KUBECTL_PATH) + if labeler.active_labels() is not None: + labeler.apply_node_labels() log.info("Node %s labelled successfully", node) else: log.info("Node %s not yet labelled", node) diff --git a/tests/integration/test_k8s.py b/tests/integration/test_k8s.py index 2b9480d5..492e136d 100644 --- a/tests/integration/test_k8s.py +++ b/tests/integration/test_k8s.py @@ -14,6 +14,16 @@ log = logging.getLogger(__name__) +async def get_nodes(k8s): + action = await k8s.run("k8s kubectl get nodes -o json") + result = await action.wait() + assert result.results["return-code"] == 0, "Failed to get nodes with kubectl" + log.info("Parsing node list...") + node_list = json.loads(result.results["stdout"]) + assert node_list["kind"] == "List", "Should have found a list of nodes" + return node_list["items"] + + @retry(reraise=True, stop=stop_after_attempt(12), wait=wait_fixed(15)) async def ready_nodes(k8s, expected_count): """Get a list of the ready nodes. @@ -26,26 +36,20 @@ async def ready_nodes(k8s, expected_count): list of nodes """ log.info("Finding all nodes...") - action = await k8s.run("k8s kubectl get nodes -o json") - result = await action.wait() - assert result.results["return-code"] == 0, "Failed to get nodes with kubectl" - log.info("Parsing node list...") - node_list = json.loads(result.results["stdout"]) - assert node_list["kind"] == "List", "Should have found a list of nodes" - nodes = { + nodes = await get_nodes(k8s) + ready_nodes = { node["metadata"]["name"]: all( condition["status"] == "False" for condition in node["status"]["conditions"] if condition["type"] != "Ready" ) - for node in node_list["items"] + for node in nodes } - log.info("Found %d/%d nodes...", len(nodes), expected_count) - assert len(nodes) == expected_count, f"Expect {expected_count} nodes in the list" - for node, ready in nodes.items(): + log.info("Found %d/%d nodes...", len(ready_nodes), expected_count) + assert len(ready_nodes) == expected_count, f"Expect {expected_count} nodes in the list" + for node, ready in ready_nodes.items(): log.info("Node %s is %s..", node, "ready" if ready else "not ready") assert ready, f"Node not yet ready: {node}." - return nodes @pytest.mark.abort_on_fail @@ -55,3 +59,14 @@ async def test_nodes_ready(kubernetes_cluster): worker = kubernetes_cluster.applications["k8s-worker"] expected_nodes = len(k8s.units) + len(worker.units) await ready_nodes(k8s.units[0], expected_nodes) + + +async def test_nodes_labelled(kubernetes_cluster): + k8s = kubernetes_cluster.applications["k8s"] + worker = kubernetes_cluster.applications["k8s-worker"] + nodes = await get_nodes(k8s.units[0]) + control_plane_label = "node-role.kubernetes.io/control-plane" + control_plane = [n for n in nodes if control_plane_label in n["metadata"]["labels"]] + assert len(k8s.units) == len(control_plane), "Not all control-plane nodes labeled" + juju_nodes = [n for n in nodes if "juju-charm" in n["metadata"]["labels"]] + assert len(k8s.units + worker.units) == len(juju_nodes), "Not all nodes labeled as juju-charms"