Skip to content

Commit

Permalink
Don't label workers by default with node-role due to lack of permissions
Browse files Browse the repository at this point in the history
  • Loading branch information
addyess committed Feb 21, 2024
1 parent 661b03a commit 6914a3d
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 20 deletions.
7 changes: 5 additions & 2 deletions charms/worker/charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,14 @@ config:
type: string
description: Snap channel of the k8s snap
labels:
default: "node-role.kubernetes.io/worker="
default: ""
type: string
description: |
Labels can be used to organize and to select subsets of nodes in the
cluster. Declare node labels in key=value format, separated by spaces.
cluster. Declare node labels in key=value format, separated by spaces.
Note: Due to NodeRestriction, workers are limited to how they can label themselves
https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#noderestriction
parts:
charm:
build-packages: [git]
Expand Down
10 changes: 4 additions & 6 deletions charms/worker/k8s/src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,9 +68,6 @@ def __init__(self, *args):
"""
super().__init__(*args)
factory = UnixSocketConnectionFactory(unix_socket=K8SD_SNAP_SOCKET, timeout=320)
self.label_maker = LabelMaker(
self, kubeconfig_path=self._source_kubeconfig, kubectl=KUBECTL_PATH
)
self.api_manager = K8sdAPIManager(factory)
self.cos = COSIntegration(self)
self.reconciler = Reconciler(self, self._reconcile)
Expand Down Expand Up @@ -355,13 +352,14 @@ def _generate_kubeconfig(self):
KUBECONFIG.parent.mkdir(parents=True, exist_ok=True)
KUBECONFIG.write_bytes(self._source_kubeconfig.read_bytes())

@status.on_error(ops.WaitingStatus("Waiting to apply node labels"), LabelMaker.NodeLabelError)
@status.on_error(ops.BlockedStatus("Cannot apply node-labels"), LabelMaker.NodeLabelError)
def _apply_node_labels(self):
"""Apply labels to the node."""
status.add(ops.MaintenanceStatus("Apply Node Labels"))
node = self.get_node_name()
if self.label_maker.active_labels() is not None:
self.label_maker.apply_node_labels()
labeler = LabelMaker(self, kubeconfig_path=self._source_kubeconfig, kubectl=KUBECTL_PATH)
if labeler.active_labels() is not None:
labeler.apply_node_labels()
log.info("Node %s labelled successfully", node)
else:
log.info("Node %s not yet labelled", node)
Expand Down
39 changes: 27 additions & 12 deletions tests/integration/test_k8s.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,16 @@
log = logging.getLogger(__name__)


async def get_nodes(k8s):
action = await k8s.run("k8s kubectl get nodes -o json")
result = await action.wait()
assert result.results["return-code"] == 0, "Failed to get nodes with kubectl"
log.info("Parsing node list...")
node_list = json.loads(result.results["stdout"])
assert node_list["kind"] == "List", "Should have found a list of nodes"
return node_list["items"]


@retry(reraise=True, stop=stop_after_attempt(12), wait=wait_fixed(15))
async def ready_nodes(k8s, expected_count):
"""Get a list of the ready nodes.
Expand All @@ -26,26 +36,20 @@ async def ready_nodes(k8s, expected_count):
list of nodes
"""
log.info("Finding all nodes...")
action = await k8s.run("k8s kubectl get nodes -o json")
result = await action.wait()
assert result.results["return-code"] == 0, "Failed to get nodes with kubectl"
log.info("Parsing node list...")
node_list = json.loads(result.results["stdout"])
assert node_list["kind"] == "List", "Should have found a list of nodes"
nodes = {
nodes = await get_nodes(k8s)
ready_nodes = {
node["metadata"]["name"]: all(
condition["status"] == "False"
for condition in node["status"]["conditions"]
if condition["type"] != "Ready"
)
for node in node_list["items"]
for node in nodes
}
log.info("Found %d/%d nodes...", len(nodes), expected_count)
assert len(nodes) == expected_count, f"Expect {expected_count} nodes in the list"
for node, ready in nodes.items():
log.info("Found %d/%d nodes...", len(ready_nodes), expected_count)
assert len(ready_nodes) == expected_count, f"Expect {expected_count} nodes in the list"
for node, ready in ready_nodes.items():
log.info("Node %s is %s..", node, "ready" if ready else "not ready")
assert ready, f"Node not yet ready: {node}."
return nodes


@pytest.mark.abort_on_fail
Expand All @@ -55,3 +59,14 @@ async def test_nodes_ready(kubernetes_cluster):
worker = kubernetes_cluster.applications["k8s-worker"]
expected_nodes = len(k8s.units) + len(worker.units)
await ready_nodes(k8s.units[0], expected_nodes)


async def test_nodes_labelled(kubernetes_cluster):
k8s = kubernetes_cluster.applications["k8s"]
worker = kubernetes_cluster.applications["k8s-worker"]
nodes = await get_nodes(k8s.units[0])
control_plane_label = "node-role.kubernetes.io/control-plane"
control_plane = [n for n in nodes if control_plane_label in n["metadata"]["labels"]]
assert len(k8s.units) == len(control_plane), "Not all control-plane nodes labeled"
juju_nodes = [n for n in nodes if "juju-charm" in n["metadata"]["labels"]]
assert len(k8s.units + worker.units) == len(juju_nodes), "Not all nodes labeled as juju-charms"

0 comments on commit 6914a3d

Please sign in to comment.