From 147cc9495f90a0ecf5a06bdd293f839eb8ebb8d0 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Fri, 13 Dec 2024 21:34:15 -0600 Subject: [PATCH 01/11] Adjustments to test charm upgrades --- charms/worker/k8s/src/literals.py | 6 +- tests/integration/conftest.py | 334 ++-------------- tests/integration/data/test-bundle-ceph.yaml | 1 - tests/integration/data/test-bundle-etcd.yaml | 2 - tests/integration/data/test-bundle.yaml | 2 - tests/integration/helpers.py | 394 ++++++++++++++++++- tests/integration/test_ceph.py | 2 +- tests/integration/test_etcd.py | 4 +- tests/integration/test_k8s.py | 22 +- tests/integration/test_upgrade.py | 74 ++++ 10 files changed, 502 insertions(+), 339 deletions(-) create mode 100644 tests/integration/test_upgrade.py diff --git a/charms/worker/k8s/src/literals.py b/charms/worker/k8s/src/literals.py index 50b2180f..df1a1ef7 100644 --- a/charms/worker/k8s/src/literals.py +++ b/charms/worker/k8s/src/literals.py @@ -61,9 +61,9 @@ }, # NOTE: Update the dependencies for the k8s-service before releasing. "k8s_service": { - "dependencies": {"k8s-worker": "^1.30, < 1.32"}, + "dependencies": {"k8s-worker": "^1.31, < 1.33"}, "name": "k8s", - "upgrade_supported": "^1.30, < 1.32", - "version": "1.31.3", + "upgrade_supported": "^1.31, < 1.33", + "version": "1.32.0", }, } diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9b84c194..9e269810 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -2,22 +2,17 @@ # See LICENSE file for licensing details. """Fixtures for charm tests.""" -import asyncio import contextlib import json import logging -import re import shlex -from dataclasses import dataclass, field -from itertools import chain from pathlib import Path -from typing import Dict, List, Mapping, Optional, Tuple +from typing import Optional import juju.utils import pytest import pytest_asyncio import yaml -from juju.application import Application from juju.model import Model from juju.tag import untag from kubernetes import config as k8s_config @@ -25,19 +20,26 @@ from pytest_operator.plugin import OpsTest from .cos_substrate import LXDSubstrate -from .helpers import get_unit_cidrs, is_deployed +from .helpers import Bundle, Charm, cloud_type, get_unit_cidrs, is_deployed log = logging.getLogger(__name__) TEST_DATA = Path(__file__).parent / "data" DEFAULT_SNAP_INSTALLATION = TEST_DATA / "default-snap-installation.tar.gz" -DEFAULT_RESOURCES = {"snap-installation": ""} def pytest_addoption(parser: pytest.Parser): """Parse additional pytest options. - --charm-file can be used multiple times, specifies which local charm files are available - --upgrade-from instruct tests to start with a specific channel, and upgrade to these charms + --charm-file + can be used multiple times, specifies which local charm files are available + --upgrade-from + instruct tests to start with a specific channel, and upgrade to these charms + --snap-installation-resource + path to the snap installation resource + --lxd-containers + if cloud is LXD, use containers + --apply-proxy + apply proxy to model-config Args: parser: Pytest parser. @@ -68,7 +70,10 @@ def pytest_configure(config): config: Pytest config. """ config.addinivalue_line("markers", "cos: mark COS integration tests.") - config.addinivalue_line("markers", "bundle_file(name): specify a YAML bundle file for a test.") + config.addinivalue_line( + "markers", + "bundle(file='', apps_local={}, apps_channel={}, apps_resources={}): specify a YAML bundle file for a test.", + ) def pytest_collection_modifyitems(config, items): @@ -87,234 +92,6 @@ def pytest_collection_modifyitems(config, items): item.add_marker(skip_cos) -@dataclass -class Charm: - """Represents source charms. - - Attrs: - ops_test: Instance of the pytest-operator plugin - arch: Cloud Architecture - path: Path to the charm file - metadata: Charm's metadata - app_name: Preferred name of the juju application - """ - - ops_test: OpsTest - arch: str - path: Path - _charmfile: Optional[Path] = None - _URL_RE = re.compile(r"ch:(?P\w+)/(?P\w+)/(?P.+)") - - @staticmethod - def craft_url(charm: str, series: str, arch: str) -> str: - """Craft a charm URL. - - Args: - charm: Charm name - series: Cloud series - arch: Cloud architecture - - Returns: - string: URL to the charm - """ - if m := Charm._URL_RE.match(charm): - charm = m.group("charm") - return f"ch:{arch}/{series}/{charm}" - - @property - def metadata(self) -> dict: - """Charm Metadata.""" - return yaml.safe_load((self.path / "charmcraft.yaml").read_text()) - - @property - def app_name(self) -> str: - """Suggested charm name.""" - return self.metadata["name"] - - async def resolve(self, charm_files: List[str]) -> Path: - """Build or find the charm with ops_test. - - Args: - charm_files: The list charms files to resolve - - Return: - path to charm file - - Raises: - FileNotFoundError: the charm file wasn't found - """ - if self._charmfile is None: - try: - header = f"{self.app_name}_" - charm_name = header + "*.charm" - potentials = chain( - map(Path, charm_files), # Look in pytest arguments - Path().glob(charm_name), # Look in top-level path - self.path.glob(charm_name), # Look in charm-level path - ) - arch_choices = filter(lambda s: self.arch in str(s), potentials) - self._charmfile, *_ = filter(lambda s: s.name.startswith(header), arch_choices) - log.info("For %s found charmfile %s", self.app_name, self._charmfile) - except ValueError: - log.warning("No pre-built charm is available, let's build it") - if self._charmfile is None: - log.info("For %s build charmfile", self.app_name) - self._charmfile = await self.ops_test.build_charm(self.path) - if self._charmfile is None: - raise FileNotFoundError(f"{self.app_name}_*.charm not found") - return self._charmfile.resolve() - - -@dataclass -class Bundle: - """Represents test bundle. - - Attrs: - ops_test: Instance of the pytest-operator plugin - path: Path to the bundle file - content: Loaded content from the path - arch: Cloud Architecture - render: Path to a rendered bundle - applications: Mapping of applications in the bundle. - """ - - ops_test: OpsTest - path: Path - arch: str - _content: Mapping = field(default_factory=dict) - - @classmethod - async def create(cls, ops_test: OpsTest, path: Path) -> "Bundle": - """Create a bundle object. - - Args: - ops_test: Instance of the pytest-operator plugin - path: Path to the bundle file - - Returns: - Bundle: Instance of the Bundle - """ - arch = await cloud_arch(ops_test) - _type, _vms = await cloud_type(ops_test) - bundle = cls(ops_test, path, arch) - if _type == "lxd" and not _vms: - log.info("Drop lxd machine constraints") - bundle.drop_constraints() - if _type == "lxd" and _vms: - log.info("Constrain lxd machines with virt-type: virtual-machine") - bundle.add_constraints({"virt-type": "virtual-machine"}) - return bundle - - @property - def content(self) -> Mapping: - """Yaml content of the bundle loaded into a dict""" - if not self._content: - loaded = yaml.safe_load(self.path.read_bytes()) - series = loaded.get("series", "focal") - for app in loaded["applications"].values(): - app["charm"] = Charm.craft_url(app["charm"], series=series, arch=self.arch) - self._content = loaded - return self._content - - @property - def applications(self) -> Mapping[str, dict]: - """Mapping of all available application in the bundle.""" - return self.content["applications"] - - @property - def render(self) -> Path: - """Path to written bundle config to be deployed.""" - self.add_constraints({"arch": self.arch}) - target = self.ops_test.tmp_path / "bundles" / self.path.name - target.parent.mkdir(exist_ok=True, parents=True) - yaml.safe_dump(self.content, target.open("w")) - return target - - def switch(self, name: str, path: Optional[Path] = None, channel: Optional[str] = None): - """Replace charmhub application with a local charm path or specific channel. - - Args: - name (str): Which application - path (Path): Optional path to local charm - channel (str): Optional channel to use - - Raises: - ValueError: if both path and channel are provided, or neither are provided - """ - app = self.applications.get(name) - if not app: - return # Skip if the application is not in the bundle - if (not path and not channel) or (path and channel): - raise ValueError("channel and path are mutually exclusive") - if path: - app["charm"] = str(path.resolve()) - app["channel"] = None - app["resources"] = DEFAULT_RESOURCES - if channel: - app["charm"] = name - app["channel"] = channel - - def drop_constraints(self): - """Remove constraints on applications. Useful for testing on lxd.""" - for app in self.applications.values(): - app["constraints"] = "" - - def add_constraints(self, constraints: Dict[str, str]): - """Add constraints to applications. - - Args: - constraints: Mapping of constraints to add to applications. - """ - for app in self.applications.values(): - if app.get("num_units", 0) < 1: - log.info("Skipping constraints for subordinate charm: %s", app["charm"]) - continue - val: str = app.get("constraints", "") - existing = dict(kv.split("=", 1) for kv in val.split()) - existing.update(constraints) - app["constraints"] = " ".join(f"{k}={v}" for k, v in existing.items()) - - -async def cloud_arch(ops_test: OpsTest) -> str: - """Return current architecture of the selected controller - - Args: - ops_test (OpsTest): ops_test plugin - - Returns: - string describing current architecture of the underlying cloud - """ - assert ops_test.model, "Model must be present" - controller = await ops_test.model.get_controller() - controller_model = await controller.get_model("controller") - arch = set( - machine.safe_data["hardware-characteristics"]["arch"] - for machine in controller_model.machines.values() - ) - return arch.pop() - - -async def cloud_type(ops_test: OpsTest) -> Tuple[str, bool]: - """Return current cloud type of the selected controller - - Args: - ops_test (OpsTest): ops_test plugin - - Returns: - Tuple: - string describing current type of the underlying cloud - bool describing if VMs are enabled - """ - assert ops_test.model, "Model must be present" - controller = await ops_test.model.get_controller() - cloud = await controller.cloud() - _type = cloud.cloud.type_ - vms = True # Assume VMs are enabled - if _type == "lxd": - vms = not ops_test.request.config.getoption("--lxd-containers") - return _type, vms - - async def cloud_proxied(ops_test: OpsTest): """Setup a cloud proxy settings if necessary @@ -354,7 +131,6 @@ async def cloud_profile(ops_test: OpsTest): @contextlib.asynccontextmanager async def deploy_model( - request: pytest.FixtureRequest, ops_test: OpsTest, model_name: str, bundle: Bundle, @@ -362,7 +138,6 @@ async def deploy_model( """Add a juju model, deploy apps into it, wait for them to be active. Args: - request: handle to pytest requests from calling fixture ops_test: Instance of the pytest-operator plugin model_name: name of the model in which to deploy bundle: Bundle object to deploy or redeploy into the model @@ -371,8 +146,8 @@ async def deploy_model( model object """ config: Optional[dict] = {} - if request.config.option.model_config: - config = ops_test.read_model_config(request.config.option.model_config) + if ops_test.request.config.option.model_config: + config = ops_test.read_model_config(ops_test.request.config.option.model_config) credential_name = ops_test.cloud_name if model_name not in ops_test.models: await ops_test.track_model( @@ -384,7 +159,8 @@ async def deploy_model( with ops_test.model_context(model_name) as the_model: await cloud_profile(ops_test) async with ops_test.fast_forward("60s"): - await the_model.deploy(bundle.render) + bundle_yaml = bundle.render(ops_test.tmp_path) + await the_model.deploy(bundle_yaml) await the_model.wait_for_idle( apps=list(bundle.applications), status="active", @@ -396,85 +172,27 @@ async def deploy_model( log.fatal("Failed to determine model: model_name=%s", model_name) -def bundle_file(request) -> Path: - """Helper to get bundle file. - - Args: - request: pytest request object - - Returns: - path to test's bundle file - """ - _file = "test-bundle.yaml" - bundle_marker = request.node.get_closest_marker("bundle_file") - if bundle_marker: - _file = bundle_marker.args[0] - return Path(__file__).parent / "data" / _file - - @pytest_asyncio.fixture(scope="module") async def kubernetes_cluster(request: pytest.FixtureRequest, ops_test: OpsTest): - """Deploy local kubernetes charms.""" - bundle_path = bundle_file(request) + """Deploy kubernetes charms according to the bundle_marker.""" model = "main" + bundle, markings = await Bundle.create(ops_test) with ops_test.model_context(model) as the_model: - if await is_deployed(the_model, bundle_path): + if await is_deployed(the_model, bundle.path): log.info("Using existing model.") yield ops_test.model return - log.info("Deploying cluster using %s bundle.", bundle_path) - - bundle = await Bundle.create(ops_test, bundle_path) + log.info("Deploying new cluster using %s bundle.", bundle.path) if request.config.option.apply_proxy: await cloud_proxied(ops_test) - charms = [Charm(ops_test, bundle.arch, Path("charms") / p) for p in ("worker/k8s", "worker")] - charm_files_args = request.config.option.charm_files - DEFAULT_RESOURCES["snap-installation"] = request.config.option.snap_installation_resource - charm_files = await asyncio.gather(*[charm.resolve(charm_files_args) for charm in charms]) - switch_to_path = {} - for path, charm in zip(charm_files, charms): - if upgrade_channel := request.config.option.upgrade_from: - bundle.switch(charm.app_name, channel=upgrade_channel) - switch_to_path[charm.app_name] = path - else: - bundle.switch(charm.app_name, path=path) - - async with deploy_model(request, ops_test, model, bundle) as the_model: - await upgrade_model(the_model, switch_to_path) + await bundle.apply_marking(ops_test, markings) + async with deploy_model(ops_test, model, bundle) as the_model: yield the_model -async def upgrade_model(model: Model, switch_to_path: dict[str, Path]): - """Upgrade the model with the provided charms. - - Args: - model: Juju model - switch_to_path: Mapping of app_name to charm - - """ - if not switch_to_path: - return - - async def _refresh(app_name: str): - """Refresh the application. - - Args: - app_name: Name of the application to refresh - """ - app: Application = model.applications[app_name] - await app.refresh(path=switch_to_path[app_name], resources=DEFAULT_RESOURCES) - - await asyncio.gather(*[_refresh(app) for app in switch_to_path]) - await model.wait_for_idle( - apps=list(switch_to_path.keys()), - status="active", - timeout=30 * 60, - ) - - @pytest_asyncio.fixture(name="_grafana_agent", scope="module") async def grafana_agent(kubernetes_cluster: Model): """Deploy Grafana Agent.""" diff --git a/tests/integration/data/test-bundle-ceph.yaml b/tests/integration/data/test-bundle-ceph.yaml index 4f93361e..dc015b6f 100644 --- a/tests/integration/data/test-bundle-ceph.yaml +++ b/tests/integration/data/test-bundle-ceph.yaml @@ -8,7 +8,6 @@ series: jammy applications: k8s: charm: k8s - channel: latest/edge constraints: cores=2 mem=8G root-disk=16G num_units: 1 ceph-csi: diff --git a/tests/integration/data/test-bundle-etcd.yaml b/tests/integration/data/test-bundle-etcd.yaml index 662c984f..42dbdf62 100644 --- a/tests/integration/data/test-bundle-etcd.yaml +++ b/tests/integration/data/test-bundle-etcd.yaml @@ -18,7 +18,6 @@ applications: num_units: 1 k8s: charm: k8s - channel: latest/edge num_units: 1 constraints: cores=2 mem=8G root-disk=16G options: @@ -26,7 +25,6 @@ applications: bootstrap-node-taints: "node-role.kubernetes.io/control-plane=:NoSchedule" k8s-worker: charm: k8s-worker - channel: latest/edge constraints: cores=2 mem=8G root-disk=16G num_units: 1 relations: diff --git a/tests/integration/data/test-bundle.yaml b/tests/integration/data/test-bundle.yaml index b0fe4a0f..ca38a19a 100644 --- a/tests/integration/data/test-bundle.yaml +++ b/tests/integration/data/test-bundle.yaml @@ -8,7 +8,6 @@ series: focal applications: k8s: charm: k8s - channel: latest/edge num_units: 3 constraints: cores=2 mem=8G root-disk=16G expose: true @@ -21,7 +20,6 @@ applications: kubelet-extra-args: "v=3" k8s-worker: charm: k8s-worker - channel: latest/edge num_units: 2 constraints: cores=2 mem=8G root-disk=16G options: diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index dae1b2ba..97819d00 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -4,19 +4,26 @@ # pylint: disable=too-many-arguments,too-many-positional-arguments +import asyncio import ipaddress import json import logging +import re import shlex +from dataclasses import dataclass, field +from functools import cache, cached_property +from itertools import chain from pathlib import Path -from typing import List +from typing import Dict, List, Mapping, Optional, Sequence, Tuple import yaml from juju import unit from juju.model import Model +from pytest_operator.plugin import OpsTest from tenacity import AsyncRetrying, before_sleep_log, retry, stop_after_attempt, wait_fixed log = logging.getLogger(__name__) +CHARMCRAFT_DIRS = {"k8s": Path("charms/worker/k8s"), "k8s-worker": Path("charms/worker")} async def is_deployed(model: Model, bundle_path: Path) -> bool: @@ -193,3 +200,388 @@ async def get_pod_logs( result = await action.wait() assert result.results["return-code"] == 0, f"Failed to retrieve pod {name} logs." return result.results["stdout"] + + +async def get_leader(app) -> int: + """Find leader unit of an application. + + Args: + app: Juju application + + Returns: + int: index to leader unit + + Raises: + ValueError: No leader found + """ + is_leader = await asyncio.gather(*(u.is_leader_from_status() for u in app.units)) + for idx, flag in enumerate(is_leader): + if flag: + return idx + raise ValueError("No leader found") + + +@dataclass +class Markings: + """Test markings for the bundle. + + Attrs: + apps_local: List of application names needing local files to replace charm urls + apps_channel: Mapping of application names to channels + apps_resources: Mapping of application names to resources + """ + + apps_local: List[str] = field(default_factory=list) + apps_channel: Mapping = field(default_factory=dict) + apps_resources: Mapping = field(default_factory=dict) + + +@dataclass +class CharmUrl: + """Represents a charm URL. + + Attrs: + name: Name of the charm in the store + series: Cloud series + arch: Cloud architecture + """ + + name: str + series: str + arch: str + _URL_RE = re.compile(r"ch:(?P\w+)/(?P\w+)/(?P.+)") + + @classmethod + def craft(cls, name: str, series: str, arch: str) -> "CharmUrl": + """Parse a charm URL. + + Args: + name: Name or URL of the charm + series: Cloud series + arch: Cloud architecture + + Returns: + CharmUrl object + """ + if m := cls._URL_RE.match(name): + name = m.group("charm") + return cls(name, series, arch) + + def __str__(self) -> str: + """Return the charm URL. + + Returns: + string: charm URL + """ + return f"ch:{self.arch}/{self.series}/{self.name}" + + +@dataclass +class Charm: + """Represents source charms in this repository. + + Attrs: + path: Path to the charmcraft file + metadata: Charm's metadata + name: Name of the charm from the metadata + local_path: Path to the built charm file + """ + + path: Path + _charmfile: Optional[Path] = None + + @cached_property + def metadata(self) -> dict: + """Charm Metadata.""" + return yaml.safe_load((self.path / "charmcraft.yaml").read_text()) + + @property + def name(self) -> str: + """Name defined by the charm.""" + return self.metadata["name"] + + @property + def local_path(self) -> Path: + """Local path to the charm. + + Returns: + Path to the built charm file + Raises: + FileNotFoundError: the charm file wasn't found + """ + if self._charmfile is None: + raise FileNotFoundError(f"{self.name}_*.charm not found") + return self._charmfile + + @classmethod + @cache + def find(cls, name: str) -> Optional["Charm"]: + """Find a charm by name. + + Args: + name: Name of the charm + + Returns: + Charm object or None + """ + if charmcraft := CHARMCRAFT_DIRS.get(name): + return cls(charmcraft) + + async def resolve(self, ops_test: OpsTest, arch: str) -> "Charm": + """Build or find the charm with ops_test. + + Args: + ops_test: Instance of the pytest-operator plugin + arch (str): Cloud architecture + + Return: + self (Charm): the resolved charm + + Raises: + FileNotFoundError: the charm file wasn't found + """ + prefix = f"{self.name}_" + if self._charmfile is None: + charm_files = ops_test.request.config.option.charm_files or [] + try: + charm_name = prefix + "*.charm" + potentials = chain( + map(Path, charm_files), # Look in pytest arguments + Path().glob(charm_name), # Look in top-level path + self.path.glob(charm_name), # Look in charm-level path + ) + arch_choices = filter(lambda s: arch in str(s), potentials) + self._charmfile, *_ = filter(lambda s: s.name.startswith(prefix), arch_choices) + log.info("For %s found charmfile %s", self.name, self._charmfile) + except ValueError: + log.warning("No pre-built charm is available, let's build it") + if self._charmfile is None: + log.info("For %s build charmfile", self.name) + self._charmfile = await ops_test.build_charm(self.path) + if self._charmfile is None: + raise FileNotFoundError(f"{prefix}*.charm not found") + return self + + +@dataclass +class Bundle: + """Represents a test bundle. + + Attrs: + path: Path to the bundle file + arch: Cloud Architecture + content: Loaded content from the path + applications: Mapping of applications in the bundle. + """ + + path: Path + arch: str + _content: Mapping = field(default_factory=dict) + + @classmethod + async def create(cls, ops_test) -> Tuple["Bundle", Markings]: + """Craft a bundle for the given ops_test environment. + + Args: + ops_test: Instance of the pytest-operator plugin + + Returns: + Bundle object for the test + Markings from the test + """ + bundle_marker = ops_test.request.node.get_closest_marker("bundle") + assert bundle_marker, "No bundle marker found" + kwargs = {**bundle_marker.kwargs} + + if val := kwargs.pop("file", None): + path = Path(__file__).parent / "data" / val + else: + log.warning("No file specified, using default test-bundle.yaml") + path = Path(__file__).parent / "data" / "test-bundle.yaml" + + arch = await cloud_arch(ops_test) + assert arch, "Architecture must be known before customizing the bundle" + + bundle = cls(path=path, arch=arch) + bundle.add_constraints({"arch": arch}) + + assert not all( + _ in kwargs for _ in ("apps_local", "apps_channel") + ), "Cannot use both apps_local and apps_channel" + + return bundle, Markings(**kwargs) + + @property + def content(self) -> Mapping: + """Yaml content of the bundle loaded into a dict + + Returns: + Mapping: bundle content + """ + if not self._content: + loaded = yaml.safe_load(self.path.read_bytes()) + series = loaded.get("series", "focal") + for app in loaded["applications"].values(): + app["charm"] = CharmUrl(app["charm"], series=series, arch=self.arch) + self._content = loaded + return self._content + + @property + def applications(self) -> Mapping[str, dict]: + """Mapping of all available application in the bundle. + + Returns: + Mapping: application name to application details + """ + return self.content["applications"] + + async def discover_charm_files(self, ops_test: OpsTest) -> Dict[str, Charm]: + """Discover charm files for the applications in the bundle. + + Args: + ops_test: Instance of the pytest-operator plugin + arch: Cloud architecture + + Returns: + Mapping: application name to Charm object + """ + app_to_charm = {} + for app in self.applications.values(): + if charm := Charm.find(app["charm"].name): + await charm.resolve(ops_test, self.arch) + app_to_charm[charm.name] = charm + return app_to_charm + + async def apply_marking(self, ops_test: OpsTest, markings: Markings): + """Customize the bundle for the test. + + Args: + ops_test: Instance of the pytest-operator plugin + """ + _type, _vms = await cloud_type(ops_test) + if _type == "lxd" and not _vms: + log.info("Drop lxd machine constraints") + self.drop_constraints() + if _type == "lxd" and _vms: + log.info("Constrain lxd machines with virt-type: virtual-machine") + self.add_constraints({"virt-type": "virtual-machine"}) + + charms = await self.discover_charm_files(ops_test) + + empty_resource = { + "snap-installation": ops_test.request.config.option.snap_installation_resource + } + for app in markings.apps_local: + assert app in charms, f"App={app} doesn't have a local charm" + rsc = markings.apps_resources.get(app) or empty_resource + self.switch(app, charm=charms[app], channel=None, resources=rsc) + + for app, channel in markings.apps_channel.items(): + rsc = markings.apps_resources.get(app) + self.switch(app, charm=charms[app], channel=channel, resources=rsc) + + def switch( + self, + name: str, + charm: Charm, + channel: Optional[str] = None, + resources: Optional[dict] = None, + ): + """Replace charmhub application with a local path or specific channel. + + Args: + name (str): Which application + charm (Charm): Which charm to use + channel (Optional[str]): If specified use channel, otherwise use local path + resources (dict): Optional resources to add + + Raises: + ValueError: if both path and channel are provided, or neither are provided + """ + app = self.applications.get(name) + if not app: + return # Skip if the application is not in the bundle + if not charm.local_path and not channel: + raise FileNotFoundError(f"Charm={charm.name} for App={app} not found") + if channel: + app["charm"] = charm.name + app["channel"] = channel + else: + app["charm"] = str(charm.local_path.resolve()) + app["channel"] = None + if resources: + app["resources"] = resources + + def drop_constraints(self): + """Remove constraints on applications. Useful for testing on lxd.""" + for app in self.applications.values(): + app["constraints"] = "" + + def add_constraints(self, constraints: Dict[str, str]): + """Add constraints to applications. + + Args: + constraints: Mapping of constraints to add to applications. + """ + for app in self.applications.values(): + if app.get("num_units", 0) < 1: + log.info("Skipping constraints for subordinate charm: %s", app["charm"]) + continue + val: str = app.get("constraints", "") + existing = dict(kv.split("=", 1) for kv in val.split()) + existing.update(constraints) + app["constraints"] = " ".join(f"{k}={v}" for k, v in existing.items()) + + def render(self, tmp_path: Path) -> Path: + """Path to written bundle config to be deployed. + + Args: + tmp_path: temporary path to write the bundle + + Returns: + Path to the written bundle + """ + target = tmp_path / "bundles" / self.path.name + target.parent.mkdir(exist_ok=True, parents=True) + yaml.safe_dump(self.content, target.open("w")) + return target + + +async def cloud_arch(ops_test: OpsTest) -> str: + """Return current architecture of the selected controller + + Args: + ops_test (OpsTest): ops_test plugin + + Returns: + string describing current architecture of the underlying cloud + """ + assert ops_test.model, "Model must be present" + controller = await ops_test.model.get_controller() + controller_model = await controller.get_model("controller") + arch = set( + machine.safe_data["hardware-characteristics"]["arch"] + for machine in controller_model.machines.values() + ) + return arch.pop() + + +async def cloud_type(ops_test: OpsTest) -> Tuple[str, bool]: + """Return current cloud type of the selected controller + + Args: + ops_test (OpsTest): ops_test plugin + + Returns: + Tuple: + string describing current type of the underlying cloud + bool describing if VMs are enabled + """ + assert ops_test.model, "Model must be present" + controller = await ops_test.model.get_controller() + cloud = await controller.cloud() + _type = cloud.cloud.type_ + vms = True # Assume VMs are enabled + if _type == "lxd": + vms = not ops_test.request.config.getoption("--lxd-containers") + return _type, vms diff --git a/tests/integration/test_ceph.py b/tests/integration/test_ceph.py index ae1a4732..02caa7a6 100644 --- a/tests/integration/test_ceph.py +++ b/tests/integration/test_ceph.py @@ -15,7 +15,7 @@ # This pytest mark configures the test environment to use the Canonical Kubernetes # bundle with ceph, for all the test within this module. -pytestmark = [pytest.mark.bundle_file("test-bundle-ceph.yaml")] +pytestmark = [pytest.mark.bundle(file="test-bundle-ceph.yaml", apps_local=["k8s"])] def _get_data_file_path(name) -> str: diff --git a/tests/integration/test_etcd.py b/tests/integration/test_etcd.py index 9708a93d..800af23a 100644 --- a/tests/integration/test_etcd.py +++ b/tests/integration/test_etcd.py @@ -14,9 +14,7 @@ # This pytest mark configures the test environment to use the Canonical Kubernetes # bundle with etcd, for all the test within this module. -pytestmark = [ - pytest.mark.bundle_file("test-bundle-etcd.yaml"), -] +pytestmark = [pytest.mark.bundle(file="test-bundle-etcd.yaml", apps_local=["k8s", "k8s-worker"])] @pytest.mark.abort_on_fail diff --git a/tests/integration/test_k8s.py b/tests/integration/test_k8s.py index 13f73726..81f6de98 100644 --- a/tests/integration/test_k8s.py +++ b/tests/integration/test_k8s.py @@ -15,29 +15,15 @@ from tenacity import retry, stop_after_attempt, wait_fixed from .grafana import Grafana -from .helpers import get_nodes, ready_nodes +from .helpers import get_leader, get_nodes, ready_nodes from .prometheus import Prometheus log = logging.getLogger(__name__) -async def get_leader(app) -> int: - """Find leader unit of an application. - - Args: - app: Juju application - - Returns: - int: index to leader unit - - Raises: - ValueError: No leader found - """ - is_leader = await asyncio.gather(*(u.is_leader_from_status() for u in app.units)) - for idx, flag in enumerate(is_leader): - if flag: - return idx - raise ValueError("No leader found") +pytestmark = [ + pytest.mark.bundle(file="test-bundle.yaml", apps_local=["k8s", "k8s-worker"]), +] @pytest.mark.abort_on_fail diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py new file mode 100644 index 00000000..1d22c9e1 --- /dev/null +++ b/tests/integration/test_upgrade.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python3 + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Upgrade Integration tests.""" + +import logging +from typing import Optional + +import juju.application +import juju.model +import juju.unit +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +from .helpers import Bundle, Charm, get_leader + +# This pytest mark configures the test environment to use the Canonical Kubernetes +# deploying charms from the edge channels, then upgrading them to the built charm. +pytestmark = [ + pytest.mark.bundle( + file="test-bundle.yaml", apps_channel={"k8s": "edge", "k8s-worker": "edge"} + ), +] + + +log = logging.getLogger(__name__) + + +@pytest.mark.abort_on_fail +async def test_k8s_upgrade(kubernetes_cluster: juju.model.Model, ops_test: OpsTest): + """Upgrade the model with the provided charms. + + Args: + kubernetes_cluster: The kubernetes model + ops_test: The test harness + request: The request object + """ + local_resources = { + "snap-installation": ops_test.request.config.option.snap_installation_resource + } + bundle, _ = await Bundle.create(ops_test) + charms = await bundle.discover_charm_files(ops_test) + + async def _refresh(app_name: str): + """Refresh the application. + + Args: + app_name: Name of the application to refresh + """ + app: Optional[juju.application.Application] = kubernetes_cluster.applications[app_name] + assert app is not None, f"Application {app_name} not found" + + log.info(f"Refreshing {app_name}") + leader_idx: int = await get_leader(app) + leader: juju.unit.Unit = app.units[leader_idx] + action = await leader.run_action("pre-upgrade-check") + await action.wait() + with_fault = f"Pre-upgrade of {app_name} failed with {yaml.safe_dump(action.results)}" + if app_name == "k8s": + # The k8s charm has a pre-upgrade-check action that works, k8s-worker does not. + assert action.status == "completed", with_fault + assert action.results["return-code"] == 0, with_fault + await app.refresh(path=charms[app_name].local_path, resources=local_resources) + await kubernetes_cluster.wait_for_idle( + apps=list(charms.keys()), + status="active", + timeout=30 * 60, + ) + + for app in charms: + await _refresh(app) From 963c140964dac97d9578ae5b291e366947d15c93 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 16 Dec 2024 09:37:28 -0600 Subject: [PATCH 02/11] Ignore pre-upgrade-check on k8s-workers --- charms/worker/k8s/src/inspector.py | 3 ++- charms/worker/k8s/src/upgrade.py | 7 ++++--- charms/worker/k8s/tests/unit/test_upgrade.py | 15 +++++++-------- tests/integration/conftest.py | 7 ++++--- tests/integration/helpers.py | 7 ++++--- tests/integration/test_upgrade.py | 4 ++-- 6 files changed, 23 insertions(+), 20 deletions(-) diff --git a/charms/worker/k8s/src/inspector.py b/charms/worker/k8s/src/inspector.py index 783b5ea1..1a73b4de 100644 --- a/charms/worker/k8s/src/inspector.py +++ b/charms/worker/k8s/src/inspector.py @@ -44,7 +44,7 @@ def _get_client(self) -> Client: self.client = Client(config=config.get()) return self.client - def get_nodes(self, labels: LabelSelector) -> Optional[List[Node]]: + def get_nodes(self, labels: Optional[LabelSelector] = None) -> Optional[List[Node]]: """Get nodes from the cluster. Args: @@ -56,6 +56,7 @@ def get_nodes(self, labels: LabelSelector) -> Optional[List[Node]]: Raises: ClusterInspectorError: If the nodes cannot be retrieved. """ + labels = labels or {} client = self._get_client() try: diff --git a/charms/worker/k8s/src/upgrade.py b/charms/worker/k8s/src/upgrade.py index f85e5122..520a1f7a 100644 --- a/charms/worker/k8s/src/upgrade.py +++ b/charms/worker/k8s/src/upgrade.py @@ -89,10 +89,11 @@ def pre_upgrade_check(self) -> None: Raises: ClusterNotReadyError: If the cluster is not ready for an upgrade. """ + if self.charm.is_worker: + log.info("TODO: Find some pre-upgrade checks for worker application.") + return try: - nodes = self.cluster_inspector.get_nodes( - labels={"juju-charm": "k8s-worker" if self.charm.is_worker else "k8s"} - ) + nodes = self.cluster_inspector.get_nodes() failing_pods = self.cluster_inspector.verify_pods_running(["kube-system"]) except ClusterInspector.ClusterInspectorError as e: raise ClusterNotReadyError( diff --git a/charms/worker/k8s/tests/unit/test_upgrade.py b/charms/worker/k8s/tests/unit/test_upgrade.py index 8887cfdf..3c5ddaac 100644 --- a/charms/worker/k8s/tests/unit/test_upgrade.py +++ b/charms/worker/k8s/tests/unit/test_upgrade.py @@ -21,6 +21,7 @@ class TestK8sUpgrade(unittest.TestCase): def setUp(self): """Set up common test fixtures.""" self.charm = MagicMock() + self.charm.is_worker = False self.node_manager = MagicMock(spec=ClusterInspector) self.upgrade = K8sUpgrade( self.charm, @@ -53,8 +54,8 @@ def test_pre_upgrade_check_worker_success(self): self.upgrade.pre_upgrade_check() - self.node_manager.get_nodes.assert_called_once_with(labels={"juju-charm": "k8s-worker"}) - self.node_manager.verify_pods_running.assert_called_once_with(["kube-system"]) + self.node_manager.get_nodes.assert_not_called() + self.node_manager.verify_pods_running.assert_not_called() def test_pre_upgrade_check_control_plane_success(self): """Test pre_upgrade_check succeeds for control plane nodes.""" @@ -64,15 +65,14 @@ def test_pre_upgrade_check_control_plane_success(self): self.upgrade.pre_upgrade_check() - self.node_manager.get_nodes.assert_called_once_with(labels={"juju-charm": "k8s"}) + self.node_manager.get_nodes.assert_called_once_with() def test_pre_upgrade_check_unready_nodes(self): """Test pre_upgrade_check fails when nodes are not ready.""" - self.charm.is_worker = True self.node_manager.get_nodes.return_value = [ - Node(metadata=ObjectMeta(name="worker-1")), - Node(metadata=ObjectMeta(name="worker-2")), - Node(metadata=ObjectMeta(name="worker-3")), + Node(metadata=ObjectMeta(name="k8s-1")), + Node(metadata=ObjectMeta(name="k8s-2")), + Node(metadata=ObjectMeta(name="k8s-3")), ] with self.assertRaises(ClusterNotReadyError): @@ -89,7 +89,6 @@ def test_pre_upgrade_check_cluster_inspector_error(self): def test_pre_upgrade_check_pods_not_ready(self): """Test pre_upgrade_check fails when pods are not ready.""" - self.charm.is_worker = True self.node_manager.get_nodes.return_value = None self.node_manager.verify_pods_running.return_value = "kube-system/pod-1" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 9e269810..c55b9922 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -20,7 +20,7 @@ from pytest_operator.plugin import OpsTest from .cos_substrate import LXDSubstrate -from .helpers import Bundle, Charm, cloud_type, get_unit_cidrs, is_deployed +from .helpers import Bundle, CharmUrl, cloud_type, get_unit_cidrs, is_deployed log = logging.getLogger(__name__) TEST_DATA = Path(__file__).parent / "data" @@ -72,7 +72,8 @@ def pytest_configure(config): config.addinivalue_line("markers", "cos: mark COS integration tests.") config.addinivalue_line( "markers", - "bundle(file='', apps_local={}, apps_channel={}, apps_resources={}): specify a YAML bundle file for a test.", + "bundle(file='', apps_local={}, apps_channel={}, apps_resources={}): " + "specify a YAML bundle file for a test.", ) @@ -202,7 +203,7 @@ async def grafana_agent(kubernetes_cluster: Model): machine_series = juju.utils.get_version_series(data["base"].split("@")[1]) await kubernetes_cluster.deploy( - Charm.craft_url("grafana-agent", machine_series, machine_arch), + str(CharmUrl.craft("grafana-agent", machine_series, machine_arch)), channel="stable", series=machine_series, ) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 97819d00..03a22054 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -14,7 +14,7 @@ from functools import cache, cached_property from itertools import chain from pathlib import Path -from typing import Dict, List, Mapping, Optional, Sequence, Tuple +from typing import Dict, List, Mapping, Optional, Tuple import yaml from juju import unit @@ -326,6 +326,7 @@ def find(cls, name: str) -> Optional["Charm"]: """ if charmcraft := CHARMCRAFT_DIRS.get(name): return cls(charmcraft) + return None async def resolve(self, ops_test: OpsTest, arch: str) -> "Charm": """Build or find the charm with ops_test. @@ -440,7 +441,6 @@ async def discover_charm_files(self, ops_test: OpsTest) -> Dict[str, Charm]: Args: ops_test: Instance of the pytest-operator plugin - arch: Cloud architecture Returns: Mapping: application name to Charm object @@ -457,6 +457,7 @@ async def apply_marking(self, ops_test: OpsTest, markings: Markings): Args: ops_test: Instance of the pytest-operator plugin + markings: Markings from the test """ _type, _vms = await cloud_type(ops_test) if _type == "lxd" and not _vms: @@ -496,7 +497,7 @@ def switch( resources (dict): Optional resources to add Raises: - ValueError: if both path and channel are provided, or neither are provided + FileNotFoundError: if the local charm file is not found """ app = self.applications.get(name) if not app: diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index 1d22c9e1..55f5ddb8 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -15,7 +15,7 @@ import yaml from pytest_operator.plugin import OpsTest -from .helpers import Bundle, Charm, get_leader +from .helpers import Bundle, get_leader # This pytest mark configures the test environment to use the Canonical Kubernetes # deploying charms from the edge channels, then upgrading them to the built charm. @@ -53,7 +53,7 @@ async def _refresh(app_name: str): app: Optional[juju.application.Application] = kubernetes_cluster.applications[app_name] assert app is not None, f"Application {app_name} not found" - log.info(f"Refreshing {app_name}") + log.info("Refreshing %s", app_name) leader_idx: int = await get_leader(app) leader: juju.unit.Unit = app.units[leader_idx] action = await leader.run_action("pre-upgrade-check") From 3d58015fdc7aa93bbd109155a9b593765029f823 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 16 Dec 2024 12:40:14 -0600 Subject: [PATCH 03/11] Render CharmURL to yaml --- tests/integration/helpers.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 03a22054..87cb2664 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -14,7 +14,7 @@ from functools import cache, cached_property from itertools import chain from pathlib import Path -from typing import Dict, List, Mapping, Optional, Tuple +from typing import Dict, List, Mapping, Optional, Set, Tuple import yaml from juju import unit @@ -267,13 +267,19 @@ def craft(cls, name: str, series: str, arch: str) -> "CharmUrl": name = m.group("charm") return cls(name, series, arch) - def __str__(self) -> str: - """Return the charm URL. + @staticmethod + def representer(dumper: yaml.Dumper, data: "CharmUrl") -> yaml.ScalarNode: + """Yaml representer for the CharmUrl object. + + Args: + dumper: yaml dumper + data: CharmUrl object Returns: - string: charm URL + yaml.ScalarNode: yaml node """ - return f"ch:{self.arch}/{self.series}/{self.name}" + as_str = f"ch:{data.arch}/{data.series}/{data.name}" + return dumper.represent_scalar("tag:yaml.org,2002:str", as_str) @dataclass @@ -423,7 +429,7 @@ def content(self) -> Mapping: loaded = yaml.safe_load(self.path.read_bytes()) series = loaded.get("series", "focal") for app in loaded["applications"].values(): - app["charm"] = CharmUrl(app["charm"], series=series, arch=self.arch) + app["charm"] = CharmUrl.craft(app["charm"], series=series, arch=self.arch) self._content = loaded return self._content @@ -544,7 +550,7 @@ def render(self, tmp_path: Path) -> Path: """ target = tmp_path / "bundles" / self.path.name target.parent.mkdir(exist_ok=True, parents=True) - yaml.safe_dump(self.content, target.open("w")) + yaml.dump(self.content, target.open("w")) return target @@ -560,11 +566,11 @@ async def cloud_arch(ops_test: OpsTest) -> str: assert ops_test.model, "Model must be present" controller = await ops_test.model.get_controller() controller_model = await controller.get_model("controller") - arch = set( + arch: Set[str] = { machine.safe_data["hardware-characteristics"]["arch"] for machine in controller_model.machines.values() - ) - return arch.pop() + } + return arch.pop().strip() async def cloud_type(ops_test: OpsTest) -> Tuple[str, bool]: @@ -586,3 +592,6 @@ async def cloud_type(ops_test: OpsTest) -> Tuple[str, bool]: if _type == "lxd": vms = not ops_test.request.config.getoption("--lxd-containers") return _type, vms + + +yaml.add_representer(CharmUrl, CharmUrl.representer) From bf57559463601efdafe37e80a8bc05e2390bf7e7 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 16 Dec 2024 13:28:24 -0600 Subject: [PATCH 04/11] Maintain arch constraints before rendering --- tests/integration/helpers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 87cb2664..964e889b 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -410,8 +410,6 @@ async def create(cls, ops_test) -> Tuple["Bundle", Markings]: assert arch, "Architecture must be known before customizing the bundle" bundle = cls(path=path, arch=arch) - bundle.add_constraints({"arch": arch}) - assert not all( _ in kwargs for _ in ("apps_local", "apps_channel") ), "Cannot use both apps_local and apps_channel" @@ -548,6 +546,7 @@ def render(self, tmp_path: Path) -> Path: Returns: Path to the written bundle """ + self.add_constraints({"arch": self.arch}) target = tmp_path / "bundles" / self.path.name target.parent.mkdir(exist_ok=True, parents=True) yaml.dump(self.content, target.open("w")) From a9546413354efb452a83d12ded7c0c7ac49c314a Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 16 Dec 2024 15:01:56 -0600 Subject: [PATCH 05/11] Split upgrade to a different test runner --- .github/workflows/integration_test.yaml | 3 ++- tests/integration/test_upgrade.py | 2 +- tox.ini | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 37f6f7d6..d7b7f1ca 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -41,9 +41,10 @@ jobs: arch: - {id: amd64, builder-label: ubuntu-22.04, tester-arch: AMD64} # built on azure - {id: arm64, builder-label: ARM64, tester-arch: ARM64} # built on self-hosted - suite: [k8s, etcd, ceph] + suite: [k8s, etcd, ceph, upgrade] exclude: - {arch: {id: arm64}, suite: ceph} + - {arch: {id: arm64}, suite: upgrade} with: identifier: ${{ matrix.arch.id }}-${{ matrix.suite }} builder-runner-label: ${{ matrix.arch.builder-label }} diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index 55f5ddb8..82c53e0f 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -30,7 +30,7 @@ @pytest.mark.abort_on_fail -async def test_k8s_upgrade(kubernetes_cluster: juju.model.Model, ops_test: OpsTest): +async def test_upgrade(kubernetes_cluster: juju.model.Model, ops_test: OpsTest): """Upgrade the model with the provided charms. Args: diff --git a/tox.ini b/tox.ini index 4ec8b7df..283da91b 100644 --- a/tox.ini +++ b/tox.ini @@ -84,7 +84,7 @@ deps = commands = bandit -c {toxinidir}/pyproject.toml -r {[vars]all_path} -[testenv:{integration,integration-k8s,integration-etcd,integration-ceph}] +[testenv:{integration,integration-k8s,integration-etcd,integration-ceph,integration-upgrade}] description = Run integration tests deps = -r test_requirements.txt commands = From 48d52df0382230a0bf7cbefed1960a553cddda3d Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 16 Dec 2024 16:10:10 -0600 Subject: [PATCH 06/11] Wait for stable kube-system before upgrading --- charms/worker/k8s/src/events/update_status.py | 14 ++++++++++ charms/worker/k8s/src/protocols.py | 3 +++ tests/integration/helpers.py | 27 ++++++++++++------- tests/integration/test_k8s.py | 6 ++--- tests/integration/test_upgrade.py | 19 +++++++++++-- 5 files changed, 54 insertions(+), 15 deletions(-) diff --git a/charms/worker/k8s/src/events/update_status.py b/charms/worker/k8s/src/events/update_status.py index 6a480846..7472a483 100644 --- a/charms/worker/k8s/src/events/update_status.py +++ b/charms/worker/k8s/src/events/update_status.py @@ -13,6 +13,7 @@ import charms.contextual_status as status import ops import reschedule +from inspector import ClusterInspector from protocols import K8sCharmProtocol from snap import version as snap_version from upgrade import K8sUpgrade @@ -107,4 +108,17 @@ def run(self): status.add(ops.WaitingStatus("Node not Ready")) trigger.create(reschedule.Period(seconds=30)) return + + if self.charm.is_control_plane: + inspect = self.charm.cluster_inspector + try: + if failing_pods := inspect.verify_pods_running(["kube-system"]): + status.add(ops.WaitingStatus(f"Unready kube-system Pods: {failing_pods}")) + except ClusterInspector.ClusterInspectorError as e: + log.exception("Failed to verify pods: %s", e) + status.add(ops.WaitingStatus("Waiting for API Server")) + finally: + trigger.create(reschedule.Period(seconds=30)) + return + trigger.cancel() diff --git a/charms/worker/k8s/src/protocols.py b/charms/worker/k8s/src/protocols.py index dbf97318..69d5a925 100644 --- a/charms/worker/k8s/src/protocols.py +++ b/charms/worker/k8s/src/protocols.py @@ -9,6 +9,7 @@ from charms.interface_external_cloud_provider import ExternalCloudProvider from charms.k8s.v0.k8sd_api_manager import K8sdAPIManager from charms.reconciler import Reconciler +from inspector import ClusterInspector from ops.interface_kube_control import KubeControlProvides @@ -17,6 +18,7 @@ class K8sCharmProtocol(ops.CharmBase): Attributes: api_manager (K8sdAPIManager): The API manager for the charm. + cluster_inspector (ClusterInspector): The cluster inspector for the charm. kube_control (KubeControlProvides): The kube-control interface. xcp (ExternalCloudProvider): The external cloud provider interface. reconciler (Reconciler): The reconciler for the charm @@ -28,6 +30,7 @@ class K8sCharmProtocol(ops.CharmBase): """ api_manager: K8sdAPIManager + cluster_inspector: ClusterInspector kube_control: KubeControlProvides xcp: ExternalCloudProvider reconciler: Reconciler diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 964e889b..84aa4361 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -87,22 +87,29 @@ async def get_unit_cidrs(model: Model, app_name: str, unit_num: int) -> List[str return list(sorted(local_cidrs)) -async def get_nodes(k8s): - """Return Node list +async def get_rsc(k8s, resource, namespace=None, labels=None): + """Return Pod list Args: k8s: any k8s unit + resource: string resource type + namespace: string namespace + labels: dict of labels Returns: - list of nodes + list of pods """ - action = await k8s.run("k8s kubectl get nodes -o json") + namespaced = f"-n {namespace}" if namespace else "" + labeled = " ".join(f"-l {k}={v}" for k, v in labels.items()) if labels else "" + cmd = f"k8s kubectl get {resource} {labeled} {namespaced} -o json" + + action = await k8s.run(cmd) result = await action.wait() - assert result.results["return-code"] == 0, "Failed to get nodes with kubectl" - log.info("Parsing node list...") - node_list = json.loads(result.results["stdout"]) - assert node_list["kind"] == "List", "Should have found a list of nodes" - return node_list["items"] + assert result.results["return-code"] == 0, f"Failed to get {resource} with kubectl" + log.info("Parsing %s list...", resource) + resource_list = json.loads(result.results["stdout"]) + assert resource_list["kind"] == "List", f"Should have found a list of {resource}" + return resource_list["items"] @retry(reraise=True, stop=stop_after_attempt(12), wait=wait_fixed(15)) @@ -114,7 +121,7 @@ async def ready_nodes(k8s, expected_count): expected_count: number of expected nodes """ log.info("Finding all nodes...") - nodes = await get_nodes(k8s) + nodes = await get_rsc(k8s, "nodes") ready_nodes = { node["metadata"]["name"]: all( condition["status"] == "False" diff --git a/tests/integration/test_k8s.py b/tests/integration/test_k8s.py index 81f6de98..46b256c7 100644 --- a/tests/integration/test_k8s.py +++ b/tests/integration/test_k8s.py @@ -15,7 +15,7 @@ from tenacity import retry, stop_after_attempt, wait_fixed from .grafana import Grafana -from .helpers import get_leader, get_nodes, ready_nodes +from .helpers import get_leader, get_rsc, ready_nodes from .prometheus import Prometheus log = logging.getLogger(__name__) @@ -45,7 +45,7 @@ async def test_nodes_labelled(request, kubernetes_cluster: model.Model): await kubernetes_cluster.wait_for_idle(status="active", timeout=10 * 60) try: - nodes = await get_nodes(k8s.units[0]) + nodes = await get_rsc(k8s.units[0], "nodes") labelled = [n for n in nodes if testname in n["metadata"]["labels"]] juju_nodes = [n for n in nodes if "juju-charm" in n["metadata"]["labels"]] assert len(k8s.units + worker.units) == len( @@ -60,7 +60,7 @@ async def test_nodes_labelled(request, kubernetes_cluster: model.Model): ) await kubernetes_cluster.wait_for_idle(status="active", timeout=10 * 60) - nodes = await get_nodes(k8s.units[0]) + nodes = await get_rsc(k8s.units[0], "nodes") labelled = [n for n in nodes if testname in n["metadata"]["labels"]] juju_nodes = [n for n in nodes if "juju-charm" in n["metadata"]["labels"]] assert 0 == len(labelled), "Not all nodes labelled with custom-label" diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index 82c53e0f..0b3b7c52 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -14,8 +14,9 @@ import pytest import yaml from pytest_operator.plugin import OpsTest +from tenacity import before_sleep_log, retry, stop_after_attempt, wait_fixed -from .helpers import Bundle, get_leader +from .helpers import Bundle, get_leader, get_rsc # This pytest mark configures the test environment to use the Canonical Kubernetes # deploying charms from the edge channels, then upgrading them to the built charm. @@ -43,6 +44,19 @@ async def test_upgrade(kubernetes_cluster: juju.model.Model, ops_test: OpsTest): } bundle, _ = await Bundle.create(ops_test) charms = await bundle.discover_charm_files(ops_test) + k8s: juju.application.Application = kubernetes_cluster.applications["k8s"] + + @retry( + stop=stop_after_attempt(6), + wait=wait_fixed(10), + before_sleep=before_sleep_log(log, logging.WARNING), + ) + async def _wait_for_idle(): + """Wait for the model to become idle.""" + kube_system_pods = await get_rsc(k8s.units[0], "pods", namespace="kube-system") + assert all( + p["status"]["phase"] == "Running" for p in kube_system_pods + ), "Kube-system not yet ready" async def _refresh(app_name: str): """Refresh the application. @@ -58,7 +72,7 @@ async def _refresh(app_name: str): leader: juju.unit.Unit = app.units[leader_idx] action = await leader.run_action("pre-upgrade-check") await action.wait() - with_fault = f"Pre-upgrade of {app_name} failed with {yaml.safe_dump(action.results)}" + with_fault = f"Pre-upgrade of '{app_name}' failed with {yaml.safe_dump(action.results)}" if app_name == "k8s": # The k8s charm has a pre-upgrade-check action that works, k8s-worker does not. assert action.status == "completed", with_fault @@ -70,5 +84,6 @@ async def _refresh(app_name: str): timeout=30 * 60, ) + await _wait_for_idle() for app in charms: await _refresh(app) From 702d4faef38641e2c8ae5d87307296f5e13cc031 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 17 Dec 2024 12:01:54 -0600 Subject: [PATCH 07/11] Allow test modules to specify which series to deploy --- tests/integration/conftest.py | 16 ++- tests/integration/data/test-bundle-etcd.yaml | 1 + tests/integration/data/test-bundle.yaml | 1 + tests/integration/helpers.py | 104 ++++++++----------- tests/integration/test_upgrade.py | 2 +- 5 files changed, 51 insertions(+), 73 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index c55b9922..ffcb155d 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -15,12 +15,13 @@ import yaml from juju.model import Model from juju.tag import untag +from juju.url import URL from kubernetes import config as k8s_config from kubernetes.client import Configuration from pytest_operator.plugin import OpsTest from .cos_substrate import LXDSubstrate -from .helpers import Bundle, CharmUrl, cloud_type, get_unit_cidrs, is_deployed +from .helpers import Bundle, cloud_type, get_unit_cidrs, is_deployed log = logging.getLogger(__name__) TEST_DATA = Path(__file__).parent / "data" @@ -72,7 +73,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "cos: mark COS integration tests.") config.addinivalue_line( "markers", - "bundle(file='', apps_local={}, apps_channel={}, apps_resources={}): " + "bundle(file='', series='', apps_local={}, apps_channel={}, apps_resources={}): " "specify a YAML bundle file for a test.", ) @@ -199,14 +200,11 @@ async def grafana_agent(kubernetes_cluster: Model): """Deploy Grafana Agent.""" primary = kubernetes_cluster.applications["k8s"] data = primary.units[0].machine.safe_data - machine_arch = data["hardware-characteristics"]["arch"] - machine_series = juju.utils.get_version_series(data["base"].split("@")[1]) + arch = data["hardware-characteristics"]["arch"] + series = juju.utils.get_version_series(data["base"].split("@")[1]) + url = URL("ch", name="grafana-agent", series=series, architecture=arch) - await kubernetes_cluster.deploy( - str(CharmUrl.craft("grafana-agent", machine_series, machine_arch)), - channel="stable", - series=machine_series, - ) + await kubernetes_cluster.deploy(url, channel="stable", series=series) await kubernetes_cluster.integrate("grafana-agent:cos-agent", "k8s:cos-agent") await kubernetes_cluster.integrate("grafana-agent:cos-agent", "k8s-worker:cos-agent") await kubernetes_cluster.integrate("k8s:cos-worker-tokens", "k8s-worker:cos-tokens") diff --git a/tests/integration/data/test-bundle-etcd.yaml b/tests/integration/data/test-bundle-etcd.yaml index 42dbdf62..32164f4a 100644 --- a/tests/integration/data/test-bundle-etcd.yaml +++ b/tests/integration/data/test-bundle-etcd.yaml @@ -29,5 +29,6 @@ applications: num_units: 1 relations: - [k8s, k8s-worker:cluster] + - [k8s, k8s-worker:containerd] - [etcd, easyrsa:client] - [etcd, k8s:etcd] diff --git a/tests/integration/data/test-bundle.yaml b/tests/integration/data/test-bundle.yaml index ca38a19a..9c59097a 100644 --- a/tests/integration/data/test-bundle.yaml +++ b/tests/integration/data/test-bundle.yaml @@ -27,3 +27,4 @@ applications: kubelet-extra-args: "v=3" relations: - [k8s, k8s-worker:cluster] + - [k8s, k8s-worker:containerd] diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 84aa4361..84d69c2e 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -8,17 +8,17 @@ import ipaddress import json import logging -import re import shlex from dataclasses import dataclass, field -from functools import cache, cached_property +from functools import cached_property from itertools import chain from pathlib import Path -from typing import Dict, List, Mapping, Optional, Set, Tuple +from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Union import yaml from juju import unit from juju.model import Model +from juju.url import URL from pytest_operator.plugin import OpsTest from tenacity import AsyncRetrying, before_sleep_log, retry, stop_after_attempt, wait_fixed @@ -233,74 +233,32 @@ class Markings: """Test markings for the bundle. Attrs: + series: Series for Machines in the bundle apps_local: List of application names needing local files to replace charm urls apps_channel: Mapping of application names to channels apps_resources: Mapping of application names to resources """ + series: Optional[str] = None apps_local: List[str] = field(default_factory=list) apps_channel: Mapping = field(default_factory=dict) apps_resources: Mapping = field(default_factory=dict) -@dataclass -class CharmUrl: - """Represents a charm URL. - - Attrs: - name: Name of the charm in the store - series: Cloud series - arch: Cloud architecture - """ - - name: str - series: str - arch: str - _URL_RE = re.compile(r"ch:(?P\w+)/(?P\w+)/(?P.+)") - - @classmethod - def craft(cls, name: str, series: str, arch: str) -> "CharmUrl": - """Parse a charm URL. - - Args: - name: Name or URL of the charm - series: Cloud series - arch: Cloud architecture - - Returns: - CharmUrl object - """ - if m := cls._URL_RE.match(name): - name = m.group("charm") - return cls(name, series, arch) - - @staticmethod - def representer(dumper: yaml.Dumper, data: "CharmUrl") -> yaml.ScalarNode: - """Yaml representer for the CharmUrl object. - - Args: - dumper: yaml dumper - data: CharmUrl object - - Returns: - yaml.ScalarNode: yaml node - """ - as_str = f"ch:{data.arch}/{data.series}/{data.name}" - return dumper.represent_scalar("tag:yaml.org,2002:str", as_str) - - @dataclass class Charm: """Represents source charms in this repository. Attrs: path: Path to the charmcraft file + url: Charm URL metadata: Charm's metadata name: Name of the charm from the metadata local_path: Path to the built charm file """ path: Path + url: URL _charmfile: Optional[Path] = None @cached_property @@ -327,18 +285,18 @@ def local_path(self) -> Path: return self._charmfile @classmethod - @cache - def find(cls, name: str) -> Optional["Charm"]: - """Find a charm by name. + def find(cls, url: Union[URL, str]) -> Optional["Charm"]: + """Find a charm managed in this repo based on its name. Args: - name: Name of the charm + url: Charm url or charm name Returns: Charm object or None """ - if charmcraft := CHARMCRAFT_DIRS.get(name): - return cls(charmcraft) + url = url if isinstance(url, URL) else URL.parse(url) + if charmcraft := CHARMCRAFT_DIRS.get(url.name): + return cls(charmcraft, url) return None async def resolve(self, ops_test: OpsTest, arch: str) -> "Charm": @@ -384,13 +342,15 @@ class Bundle: Attrs: path: Path to the bundle file arch: Cloud Architecture + series: Series for Machines in the bundle content: Loaded content from the path applications: Mapping of applications in the bundle. """ path: Path arch: str - _content: Mapping = field(default_factory=dict) + series: Optional[str] = None + _content: Dict[str, Any] = field(default_factory=dict) @classmethod async def create(cls, ops_test) -> Tuple["Bundle", Markings]: @@ -424,17 +384,19 @@ async def create(cls, ops_test) -> Tuple["Bundle", Markings]: return bundle, Markings(**kwargs) @property - def content(self) -> Mapping: + def content(self) -> Dict[str, Any]: """Yaml content of the bundle loaded into a dict Returns: - Mapping: bundle content + Dict: bundle content """ if not self._content: loaded = yaml.safe_load(self.path.read_bytes()) - series = loaded.get("series", "focal") + self.series = loaded.get("series") for app in loaded["applications"].values(): - app["charm"] = CharmUrl.craft(app["charm"], series=series, arch=self.arch) + url = URL.parse(app["charm"]) + url.architecture = self.arch + app["charm"] = url self._content = loaded return self._content @@ -458,7 +420,7 @@ async def discover_charm_files(self, ops_test: OpsTest) -> Dict[str, Charm]: """ app_to_charm = {} for app in self.applications.values(): - if charm := Charm.find(app["charm"].name): + if charm := Charm.find(app["charm"]): await charm.resolve(ops_test, self.arch) app_to_charm[charm.name] = charm return app_to_charm @@ -483,6 +445,9 @@ async def apply_marking(self, ops_test: OpsTest, markings: Markings): empty_resource = { "snap-installation": ops_test.request.config.option.snap_installation_resource } + if markings.series: + self.content["series"] = self.series = markings.series + for app in markings.apps_local: assert app in charms, f"App={app} doesn't have a local charm" rsc = markings.apps_resources.get(app) or empty_resource @@ -516,7 +481,7 @@ def switch( if not charm.local_path and not channel: raise FileNotFoundError(f"Charm={charm.name} for App={app} not found") if channel: - app["charm"] = charm.name + app["charm"] = charm.url.with_series(self.series) app["channel"] = channel else: app["charm"] = str(charm.local_path.resolve()) @@ -600,4 +565,17 @@ async def cloud_type(ops_test: OpsTest) -> Tuple[str, bool]: return _type, vms -yaml.add_representer(CharmUrl, CharmUrl.representer) +def url_representer(dumper: yaml.Dumper, data: URL) -> yaml.ScalarNode: + """Yaml representer for the Charm URL object. + + Args: + dumper: yaml dumper + data: URL object + + Returns: + yaml.ScalarNode: yaml node + """ + return dumper.represent_scalar("tag:yaml.org,2002:str", str(data)) + + +yaml.add_representer(URL, url_representer) diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index 0b3b7c52..f635ea53 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -22,7 +22,7 @@ # deploying charms from the edge channels, then upgrading them to the built charm. pytestmark = [ pytest.mark.bundle( - file="test-bundle.yaml", apps_channel={"k8s": "edge", "k8s-worker": "edge"} + file="test-bundle.yaml", apps_channel={"k8s": "edge", "k8s-worker": "edge"}, series="jammy" ), ] From 1f877ed6dedd074ca6c0576ccd8736746cb0366a Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 17 Dec 2024 12:24:55 -0600 Subject: [PATCH 08/11] Once kube-system is healthy, stop rescheduling update-status --- charms/worker/k8s/src/events/update_status.py | 36 +++++++++++++------ 1 file changed, 25 insertions(+), 11 deletions(-) diff --git a/charms/worker/k8s/src/events/update_status.py b/charms/worker/k8s/src/events/update_status.py index 7472a483..4e051519 100644 --- a/charms/worker/k8s/src/events/update_status.py +++ b/charms/worker/k8s/src/events/update_status.py @@ -9,6 +9,7 @@ """ import logging +from typing import Optional import charms.contextual_status as status import ops @@ -91,6 +92,27 @@ def _on_update_status(self, event: ops.UpdateStatusEvent): except status.ReconcilerError: log.exception("Can't update_status") + def kube_system_pods_waiting(self) -> Optional[ops.WaitingStatus]: + """Check if kube-system pods are waiting. + + Returns: + WaitingStatus: waiting status if kube-system pods are not ready. + """ + if self.charm.is_worker: + # Worker nodes don't need to check the kube-system pods + return None + + waiting, inspect = None, self.charm.cluster_inspector + + try: + if failing_pods := inspect.verify_pods_running(["kube-system"]): + waiting = ops.WaitingStatus(f"Unready Pods: {failing_pods}") + except ClusterInspector.ClusterInspectorError as e: + log.exception("Failed to verify pods: %s", e) + waiting = ops.WaitingStatus("Waiting for API Server") + + return waiting + def run(self): """Check k8s snap status.""" version, overridden = snap_version("k8s") @@ -109,16 +131,8 @@ def run(self): trigger.create(reschedule.Period(seconds=30)) return - if self.charm.is_control_plane: - inspect = self.charm.cluster_inspector - try: - if failing_pods := inspect.verify_pods_running(["kube-system"]): - status.add(ops.WaitingStatus(f"Unready kube-system Pods: {failing_pods}")) - except ClusterInspector.ClusterInspectorError as e: - log.exception("Failed to verify pods: %s", e) - status.add(ops.WaitingStatus("Waiting for API Server")) - finally: - trigger.create(reschedule.Period(seconds=30)) + if waiting := self.kube_system_pods_waiting(): + status.add(waiting) + trigger.create(reschedule.Period(seconds=30)) return - trigger.cancel() From 7006305b4d3373add92a82fb67594a86294acb94 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 17 Dec 2024 14:08:00 -0600 Subject: [PATCH 09/11] Test arm64 on focal, amd64 on jammy --- .github/workflows/integration_test.yaml | 2 +- tests/integration/conftest.py | 1 + tests/integration/data/test-bundle-etcd.yaml | 2 +- tests/integration/data/test-bundle.yaml | 2 +- tests/integration/helpers.py | 5 +++-- tests/integration/test_upgrade.py | 2 +- 6 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index d7b7f1ca..575ec39a 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -51,7 +51,7 @@ jobs: charmcraft-channel: ${{ needs.charmcraft-channel.outputs.channel }} extra-arguments: >- ${{needs.extra-args.outputs.args}} -k test_${{ matrix.suite }} - ${{ matrix.arch.id == 'arm64' && ' --lxd-containers' || '' }} + ${{ matrix.arch.id == 'arm64' && ' --lxd-containers --series=focal' || '' }} juju-channel: 3/stable load-test-enabled: false provider: lxd diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index ffcb155d..337ca5bb 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -45,6 +45,7 @@ def pytest_addoption(parser: pytest.Parser): Args: parser: Pytest parser. """ + parser.addoption("--series", default=None, help="Series to deploy, overrides any markings") parser.addoption("--charm-file", dest="charm_files", action="append", default=[]) parser.addoption( "--snap-installation-resource", default=str(DEFAULT_SNAP_INSTALLATION.resolve()) diff --git a/tests/integration/data/test-bundle-etcd.yaml b/tests/integration/data/test-bundle-etcd.yaml index 32164f4a..30c947b9 100644 --- a/tests/integration/data/test-bundle-etcd.yaml +++ b/tests/integration/data/test-bundle-etcd.yaml @@ -4,7 +4,7 @@ name: integration-test-etcd description: |- Used to deploy or refresh within an integration test model -series: focal +series: jammy applications: easyrsa: charm: easyrsa diff --git a/tests/integration/data/test-bundle.yaml b/tests/integration/data/test-bundle.yaml index 9c59097a..e35e10db 100644 --- a/tests/integration/data/test-bundle.yaml +++ b/tests/integration/data/test-bundle.yaml @@ -4,7 +4,7 @@ name: integration-test description: |- Used to deploy or refresh within an integration test model -series: focal +series: jammy applications: k8s: charm: k8s diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 84d69c2e..a5482a52 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -445,8 +445,9 @@ async def apply_marking(self, ops_test: OpsTest, markings: Markings): empty_resource = { "snap-installation": ops_test.request.config.option.snap_installation_resource } - if markings.series: - self.content["series"] = self.series = markings.series + + if series := ops_test.request.config.option.series or markings.series: + self.content["series"] = self.series = series for app in markings.apps_local: assert app in charms, f"App={app} doesn't have a local charm" diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index f635ea53..0b3b7c52 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -22,7 +22,7 @@ # deploying charms from the edge channels, then upgrading them to the built charm. pytestmark = [ pytest.mark.bundle( - file="test-bundle.yaml", apps_channel={"k8s": "edge", "k8s-worker": "edge"}, series="jammy" + file="test-bundle.yaml", apps_channel={"k8s": "edge", "k8s-worker": "edge"} ), ] From 8ccaf1f67b1dea2d413b47b3b1ad322c3c8b12cf Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 17 Dec 2024 16:19:35 -0600 Subject: [PATCH 10/11] Integrate review comment --- tests/integration/helpers.py | 8 ++++---- tests/integration/test_upgrade.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index a5482a52..94ad2610 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -88,16 +88,16 @@ async def get_unit_cidrs(model: Model, app_name: str, unit_num: int) -> List[str async def get_rsc(k8s, resource, namespace=None, labels=None): - """Return Pod list + """Get Resource list optionally filtered by namespace and labels. Args: k8s: any k8s unit - resource: string resource type + resource: string resource type (e.g. pods, services, nodes) namespace: string namespace - labels: dict of labels + labels: dict of labels to use for filtering Returns: - list of pods + list of resources """ namespaced = f"-n {namespace}" if namespace else "" labeled = " ".join(f"-l {k}={v}" for k, v in labels.items()) if labels else "" diff --git a/tests/integration/test_upgrade.py b/tests/integration/test_upgrade.py index 0b3b7c52..883243fb 100644 --- a/tests/integration/test_upgrade.py +++ b/tests/integration/test_upgrade.py @@ -47,8 +47,8 @@ async def test_upgrade(kubernetes_cluster: juju.model.Model, ops_test: OpsTest): k8s: juju.application.Application = kubernetes_cluster.applications["k8s"] @retry( - stop=stop_after_attempt(6), - wait=wait_fixed(10), + stop=stop_after_attempt(10), + wait=wait_fixed(30), before_sleep=before_sleep_log(log, logging.WARNING), ) async def _wait_for_idle(): From b1a3178dcae3ce36de21ee3631a153b4cccce979 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Tue, 17 Dec 2024 22:50:48 -0600 Subject: [PATCH 11/11] increase amd64 test runner --- .github/workflows/integration_test.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 575ec39a..8de5c3a7 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -39,8 +39,8 @@ jobs: strategy: matrix: arch: - - {id: amd64, builder-label: ubuntu-22.04, tester-arch: AMD64} # built on azure - - {id: arm64, builder-label: ARM64, tester-arch: ARM64} # built on self-hosted + - {id: amd64, builder-label: ubuntu-22.04, tester-arch: AMD64, tester-size: xlarge} # built on azure, test on self-hosted + - {id: arm64, builder-label: ARM64, tester-arch: ARM64, tester-size: large } # built and test on on self-hosted suite: [k8s, etcd, ceph, upgrade] exclude: - {arch: {id: arm64}, suite: ceph} @@ -57,7 +57,7 @@ jobs: provider: lxd self-hosted-runner: true self-hosted-runner-arch: ${{ matrix.arch.tester-arch }} - self-hosted-runner-label: large + self-hosted-runner-label: ${{ matrix.arch.tester-size }} test-timeout: 120 test-tox-env: integration-${{ matrix.suite }} trivy-fs-enabled: false