From 82c007b0054af73b214020568ca345a72cf0e198 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 14:02:10 +0300 Subject: [PATCH 01/29] create_hypershift_clusters fixture added Signed-off-by: Daniel Osypenko --- .../hypershift_client_bm_2w.yaml | 10 ++ .../hypershift_client_bm_3w.yaml | 10 ++ ocs_ci/deployment/helpers/hypershift_base.py | 27 +++- ocs_ci/deployment/hosted_cluster.py | 138 +++++++++++------- ocs_ci/framework/__init__.py | 38 +++++ tests/cross_functional/conftest.py | 84 ++++++++++- .../multicluster/test_orphan_clients.py | 83 +++++++++++ 7 files changed, 330 insertions(+), 60 deletions(-) create mode 100644 conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml create mode 100644 conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml create mode 100644 tests/cross_functional/system_test/multicluster/test_orphan_clients.py diff --git a/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml b/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml new file mode 100644 index 00000000000..73e942d82b8 --- /dev/null +++ b/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml @@ -0,0 +1,10 @@ +ENV_DATA: + platform: 'hci_baremetal' + cluster_type: 'hci_client' + cluster_namespace: "openshift-storage-client" + worker_replicas: 2 + mon_type: 'hostpath' + osd_type: 'ssd' +REPORTING: + ocs_must_gather_image: "quay.io/ocs-dev/ocs-must-gather" + ocs_must_gather_latest_tag: 'latest' diff --git a/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml b/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml new file mode 100644 index 00000000000..4b41d399e51 --- /dev/null +++ b/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml @@ -0,0 +1,10 @@ +ENV_DATA: + platform: 'hci_baremetal' + cluster_type: 'hci_client' + cluster_namespace: "openshift-storage-client" + worker_replicas: 3 + mon_type: 'hostpath' + osd_type: 'ssd' +REPORTING: + ocs_must_gather_image: "quay.io/ocs-dev/ocs-must-gather" + ocs_must_gather_latest_tag: 'latest' diff --git a/ocs_ci/deployment/helpers/hypershift_base.py b/ocs_ci/deployment/helpers/hypershift_base.py index 34e7ae994b4..e217913f2cf 100644 --- a/ocs_ci/deployment/helpers/hypershift_base.py +++ b/ocs_ci/deployment/helpers/hypershift_base.py @@ -1,6 +1,9 @@ import logging import os +import random +import re import shutil +import string import tempfile import time from datetime import datetime @@ -14,7 +17,7 @@ from ocs_ci.ocs.resources.pod import wait_for_pods_to_be_in_statuses_concurrently from ocs_ci.ocs.version import get_ocp_version from ocs_ci.utility.retry import retry -from ocs_ci.utility.utils import exec_cmd, TimeoutSampler +from ocs_ci.utility.utils import exec_cmd, TimeoutSampler, get_latest_release_version """ This module contains the base class for HyperShift hosted cluster management. @@ -62,6 +65,28 @@ def wrapper(self, *args, **kwargs): return wrapper +def get_random_cluster_name(): + """ + Get a random cluster name + + Returns: + str: random cluster name + """ + # getting the cluster name from the env data, fo instance "ibm_cloud_baremetal3; mandatory conf field" + bm_name = config.ENV_DATA.get("baremetal").get("env_name") + ocp_version = get_latest_release_version() + hcp_version = "".join([c for c in ocp_version if c.isdigit()][:3]) + match = re.search(r"\d+$", bm_name) + if match: + random_letters = "".join( + random.choice(string.ascii_lowercase) for _ in range(3) + ) + cluster_name = hcp_version + "-" + bm_name[match.start() :] + random_letters + else: + raise ValueError("Cluster name not found in the env data") + return cluster_name + + def get_binary_hcp_version(): """ Get hcp version output. Handles hcp 4.16 and 4.17 cmd differences diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index 730c1f21527..cfeae8f1328 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -56,7 +56,7 @@ def __init__(self): "No 'clusters': '{: }' set to ENV_DATA" ) - def do_deploy(self): + def do_deploy(self, cluster_names=None): """ Deploy multiple hosted OCP clusters on Provider platform and setup ODF client on them Perform the 7 stages of deployment: @@ -73,11 +73,26 @@ def do_deploy(self): solution: disable MCE and install upstream Hypershift on the cluster ! Important ! - due to n-1 logic we are assuming that desired CNV version <= OCP version + due to n-1 logic we are assuming that desired CNV version <= OCP version of managing/Provider cluster + + Args: + cluster_names (list): cluster names to deploy, if None, all clusters from ENV_DATA will be deployed + + Returns: + list: the list of HostedODF objects for all hosted OCP clusters deployed by the method successfully """ # stage 1 deploy multiple hosted OCP clusters - cluster_names = self.deploy_hosted_ocp_clusters() + # If all desired clusters were already deployed and self.deploy_hosted_ocp_clusters() returns None instead of + # the list, in this case we assume the stage of Hosted OCP clusters creation is done, and we + # proceed to ODF installation and storage client setup. + # If specific cluster names were provided, we will deploy only those. + if not cluster_names: + cluster_names = self.deploy_hosted_ocp_clusters() or list( + config.ENV_DATA.get("clusters").keys() + ) + if cluster_names: + cluster_names = self.deploy_hosted_ocp_clusters(cluster_names) # stage 2 verify OCP clusters are ready logger.info( @@ -91,11 +106,6 @@ def do_deploy(self): logger.info("Download kubeconfig for all clusters") kubeconfig_paths = self.download_hosted_clusters_kubeconfig_files() - # if all desired clusters were already deployed and step 1 returns None instead of the list, - # we proceed to ODF installation and storage client setup - if not cluster_names: - cluster_names = list(config.ENV_DATA.get("clusters").keys()) - # stage 4 deploy ODF on all hosted clusters if not already deployed for cluster_name in cluster_names: @@ -112,51 +122,39 @@ def do_deploy(self): # stage 5 verify ODF client is installed on all hosted clusters odf_installed = [] for cluster_name in cluster_names: - - if not self.config_has_hosted_odf_image(cluster_name): + if self.config_has_hosted_odf_image(cluster_name): logger.info( - f"Hosted ODF image not set for cluster '{cluster_name}', skipping ODF validation" + f"Validate ODF client operator installed on hosted OCP cluster '{cluster_name}'" ) - continue - - logger.info( - f"Validate ODF client operator installed on hosted OCP cluster '{cluster_name}'" - ) - hosted_odf = HostedODF(cluster_name) - - if not hosted_odf.odf_client_installed(): - # delete catalogsources help to finish install cluster if nodes have not enough mem - # see oc describe pod ocs-client-operator-controller-manager- -n openshift-storage-client - # when the problem was hit - hosted_odf.exec_oc_cmd( - "delete catalogsource --all -n openshift-marketplace" - ) - logger.info("wait 30 sec and create catalogsource again") - time.sleep(30) - hosted_odf.create_catalog_source() - odf_installed.append(hosted_odf.odf_client_installed()) + hosted_odf = HostedODF(cluster_name) + if not hosted_odf.odf_client_installed(): + hosted_odf.exec_oc_cmd( + "delete catalogsource --all -n openshift-marketplace" + ) + logger.info("wait 30 sec and create catalogsource again") + time.sleep(30) + hosted_odf.create_catalog_source() + odf_installed.append(hosted_odf.odf_client_installed()) # stage 6 setup storage client on all hosted clusters - client_setup = [] + client_setup_res = [] + hosted_odf_clusters_installed = [] for cluster_name in cluster_names: - - if ( - not config.ENV_DATA.get("clusters") - .get(cluster_name) - .get("setup_storage_client", False) - ): + if self.storage_installation_requested(cluster_name): logger.info( - f"Storage client setup not set for cluster '{cluster_name}', skipping storage client setup" + f"Setting up Storage client on hosted OCP cluster '{cluster_name}'" ) - continue - - logger.info( - f"Setting up Storage client on hosted OCP cluster '{cluster_name}'" - ) - hosted_odf = HostedODF(cluster_name) - client_setup.append(hosted_odf.setup_storage_client()) - - # stage 7 verify all hosted clusters are ready and print kubeconfig paths + hosted_odf = HostedODF(cluster_name) + client_installed = hosted_odf.setup_storage_client() + client_setup_res.append(client_installed) + if client_installed: + hosted_odf_clusters_installed.append(hosted_odf) + else: + logger.info( + f"Storage client installation not requested for cluster '{cluster_name}', " + "skipping storage client setup" + ) + # stage 7 verify all hosted clusters are ready and print kubeconfig paths on Agent logger.info( "kubeconfig files for all hosted OCP clusters:\n" + "\n".join( @@ -172,9 +170,11 @@ def do_deploy(self): odf_installed ), "ODF client was not deployed on all hosted OCP clusters" assert all( - client_setup + client_setup_res ), "Storage client was not set up on all hosted ODF clusters" + return hosted_odf_clusters_installed + def config_has_hosted_odf_image(self, cluster_name): """ Check if the config has hosted ODF image set for the cluster @@ -199,23 +199,53 @@ def config_has_hosted_odf_image(self, cluster_name): return regestry_exists and version_exists - def deploy_hosted_ocp_clusters( - self, - ): + def storage_installation_requested(self, cluster_name): + """ + Check if the storage client installation was requested in the config + + Args: + cluster_name: str: Name of the cluster + """ + return ( + config.ENV_DATA.get("clusters", {}) + .get(cluster_name, {}) + .get("setup_storage_client", False) + ) + + def deploy_hosted_ocp_clusters(self, cluster_names_list=None): """ Deploy multiple hosted OCP clusters on Provider platform + Args: + cluster_names_list (list, optional): List of cluster names to deploy. If not provided, all clusters + in config.ENV_DATA["clusters"] will be deployed. + Returns: - list: the list of cluster names for all hosted OCP clusters deployed by the func successfully + list: The list of cluster names for all hosted OCP clusters deployed by the func successfully """ - cluster_names_desired = list(config.ENV_DATA["clusters"].keys()) + # Get the list of cluster names to deploy + if cluster_names_list: + cluster_names_desired = [ + name + for name in cluster_names_list + if name in config.ENV_DATA["clusters"].keys() + ] + else: + cluster_names_desired = list(config.ENV_DATA["clusters"].keys()) number_of_clusters_to_deploy = len(cluster_names_desired) - logger.info(f"Deploying '{number_of_clusters_to_deploy}' number of clusters") + deployment_mode = ( + "only specified clusters" + if cluster_names_list + else "clusters from deployment configuration" + ) + logger.info( + f"Deploying '{number_of_clusters_to_deploy}' number of {deployment_mode}" + ) cluster_names = [] - for index, cluster_name in enumerate(config.ENV_DATA["clusters"].keys()): + for index, cluster_name in enumerate(cluster_names_desired): logger.info(f"Creating hosted OCP cluster: {cluster_name}") hosted_ocp_cluster = HypershiftHostedOCP(cluster_name) # we need to ensure that all dependencies are installed so for the first cluster we will install all, diff --git a/ocs_ci/framework/__init__.py b/ocs_ci/framework/__init__.py index 4e61f85c14f..8a622ddec07 100644 --- a/ocs_ci/framework/__init__.py +++ b/ocs_ci/framework/__init__.py @@ -6,6 +6,7 @@ https://docs.pytest.org/en/latest/reference.html under section PYTEST_DONT_REWRITE """ + # Use the new python 3.7 dataclass decorator, which provides an object similar # to a namedtuple, but allows type enforcement and defining methods. import os @@ -478,6 +479,43 @@ def __init__(self): primary_index = primary_config.MULTICLUSTER.get("multicluster_index") super().__init__(primary_index) + def insert_cluster_config(self, index, new_config): + """ + Insert a new cluster configuration at the given index + + Args: + index (int): The index at which to insert the new configuration + new_config (Config): The new configuration to insert + + """ + self.clusters.insert(index, new_config) + self.nclusters += 1 + self._refresh_ctx() + + def remove_cluster(self, index): + """ + Remove the cluster at the given index + + Args: + index (int): The index of the cluster to remove + """ + self.clusters.pop(index) + self.nclusters -= 1 + self._refresh_ctx() + + def remove_cluster_by_name(self, cluster_name): + """ + Remove the cluster by the cluster name + + Args: + cluster_name (str): The cluster name to remove + + Raises: + ClusterNotFoundException: In case it didn't find the cluster + + """ + self.remove_cluster(self.get_cluster_index_by_name(cluster_name)) + config = MultiClusterConfig() diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 7272bb95534..19e542b187c 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1,4 +1,5 @@ import os +import json import logging import boto3 import pytest @@ -6,9 +7,12 @@ from concurrent.futures import ThreadPoolExecutor from threading import Event +import yaml + +from ocs_ci.deployment.hosted_cluster import HostedClients from ocs_ci.utility import version from ocs_ci.utility.retry import retry -from ocs_ci.framework import config +from ocs_ci.framework import config, Config from ocs_ci.helpers.e2e_helpers import ( create_muliple_types_provider_obcs, validate_mcg_bucket_replicaton, @@ -989,7 +993,6 @@ def setup_rgw_kafka_notification(request, rgw_bucket_factory, rgw_obj): ) = amq.create_kafkadrop() def factory(): - """ Factory function implementing the fixture @@ -1106,9 +1109,9 @@ def factory( event = Event() executor = ThreadPoolExecutor( - max_workers=5 - len(skip_any_features) - if skip_any_features is not None - else 5 + max_workers=( + 5 - len(skip_any_features) if skip_any_features is not None else 5 + ) ) skip_any_features = list() if skip_any_features is None else skip_any_features @@ -1352,3 +1355,74 @@ def factory( return feature_setup_map return factory + + +@pytest.fixture() +def create_hypershift_clusters(): + """ + Create hosted hyperhift clusters. + + Here we reach cluster deployment configuration that was set in the Test. With this configuration we + create a hosted cluster. After successful creation of the hosted cluster, we update the Multicluster Config, + adding the new cluster configuration to the list of the clusters. Now we can operate with new and old clusters + switching the context of Multicluster Config + + Expects following dictionary config to be set in the Test (might be updated, check the documetation): + ENV_DATA: + clusters: + : + hosted_cluster_path: + ocp_version: + cpu_cores_per_hosted_cluster: + memory_per_hosted_cluster: + hosted_odf_registry: + hosted_odf_version: + setup_storage_client: + nodepool_replicas: + + """ + + def factory(hosted_cluster_conf_on_provider): + + data = json.loads(hosted_cluster_conf_on_provider) + worker_nodes_number = data.get("ENV_DATA").get("nodepool_replicas") + logger.info( + "Creating a hosted clusters with following deployment config: %s", + json.dumps(data, indent=4), + ) + + # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. + # To distinguish between clusters intended for deployment on deployment CI stage and those intended for + # deployment on the Test stage, we pass the names of the clusters to be deployed to the + # HostedClients().do_deploy() method. + cluster_names = list( + hosted_cluster_conf_on_provider.get("ENV_DATA").get("clusters").keys() + ) + HostedClients().do_deploy(cluster_names) + + config.update(hosted_cluster_conf_on_provider) + + for cluster_name in cluster_names: + cluster_config = Config() + with open( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), + f"conf/deployment/fusion_hci_pc/hypershift_client_bm_{worker_nodes_number}w.yaml", + ) + ) as file_stream: + def_client_config_dict = { + k: (v if v is not None else {}) + for (k, v) in yaml.safe_load(file_stream).items() + } + def_client_config_dict.get("ENV_DATA").update( + {"cluster_name": cluster_name} + ) + + cluster_config.update(def_client_config_dict) + logger.info( + "Inserting new hosted cluster config to Multicluster Config " + f"\n{json.dumps(cluster_config, indent=4)}" + ) + config.insert_cluster_config(config.nclusters, cluster_config) + + return factory diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py new file mode 100644 index 00000000000..f158e59d8fc --- /dev/null +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -0,0 +1,83 @@ +import logging + +import pytest + +from ocs_ci.deployment.helpers.hypershift_base import ( + get_random_cluster_name, +) +from ocs_ci.framework import config as ocsci_config +from ocs_ci.framework.pytest_customization.marks import ( + tier4b, +) +from ocs_ci.ocs.version import get_ocs_version +from ocs_ci.utility.utils import get_latest_release_version + + +logger = logging.getLogger(__name__) + + +@pytest.fixture +def return_to_original_context(request): + """ + Make sure that original context is restored after the test. + """ + original_cluster = ocsci_config.cluster_ctx.MULTICLUSTER["multicluster_index"] + + def finalizer(): + logger.info(f"Switching back to original cluster with index {original_cluster}") + ocsci_config.switch_ctx(original_cluster) + + request.addfinalizer(finalizer) + yield + + +class TestStorageClientRemoval(object): + """ + Test storage client removal + """ + + @tier4b + def test_remove_orphan_clients_resources( + self, create_hypershift_clusters, return_to_original_context + ): + """ + This test is to remove the orphaned storage client resources + + Steps: + 1. Create hosted client. + 2. Add block and cephfs resources and data on hosted client. + 3. Remove the storage client with `hcp` command. + 4. Verify the storage client and it's resources were removed from Provider. + """ + cluster_name = get_random_cluster_name() + odf_version = get_ocs_version() + ocp_version = get_latest_release_version() + hosted_clusters_conf_on_provider = { + "ENV_DATA": { + "clusters": { + cluster_name: { + "hosted_cluster_path": f"~/clusters/{cluster_name}/openshift-cluster-dir", + "ocp_version": ocp_version, + "cpu_cores_per_hosted_cluster": 8, + "memory_per_hosted_cluster": "12Gi", + "hosted_odf_registry": "quay.io/rhceph-dev/ocs-registry", + "hosted_odf_version": odf_version, + "setup_storage_client": True, + "nodepool_replicas": 2, + } + } + } + } + + create_hypershift_clusters(hosted_clusters_conf_on_provider) + + original_cluster_index = ocsci_config.cluster_ctx.MULTICLUSTER[ + "multicluster_index" + ] + logger.info(f"Original cluster index: {original_cluster_index}") + + ocsci_config.switch_to_cluster_by_name(cluster_name) + + logger.info( + f"Switched to cluster with index {ocsci_config.cluster_ctx.MULTICLUSTER['multicluster_index']}" + ) From 9882e279c655be2335b64ae9dcdf7630de193eb3 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 15:23:06 +0300 Subject: [PATCH 02/29] 0.1 Signed-off-by: Daniel Osypenko --- tests/cross_functional/conftest.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 19e542b187c..8de6181aef4 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -9,7 +9,7 @@ import yaml -from ocs_ci.deployment.hosted_cluster import HostedClients +# from ocs_ci.deployment.hosted_cluster import HostedClients from ocs_ci.utility import version from ocs_ci.utility.retry import retry from ocs_ci.framework import config, Config @@ -1384,11 +1384,12 @@ def create_hypershift_clusters(): def factory(hosted_cluster_conf_on_provider): - data = json.loads(hosted_cluster_conf_on_provider) - worker_nodes_number = data.get("ENV_DATA").get("nodepool_replicas") + worker_nodes_number = hosted_cluster_conf_on_provider.get("ENV_DATA").get( + "nodepool_replicas" + ) logger.info( "Creating a hosted clusters with following deployment config: %s", - json.dumps(data, indent=4), + json.dumps(hosted_cluster_conf_on_provider, indent=4), ) # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. @@ -1398,7 +1399,7 @@ def factory(hosted_cluster_conf_on_provider): cluster_names = list( hosted_cluster_conf_on_provider.get("ENV_DATA").get("clusters").keys() ) - HostedClients().do_deploy(cluster_names) + # HostedClients().do_deploy(cluster_names) config.update(hosted_cluster_conf_on_provider) From 0b4f7a8858787a62512feeab903eacdbf93610bf Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 15:40:22 +0300 Subject: [PATCH 03/29] 0.2 Signed-off-by: Daniel Osypenko --- ocs_ci/utility/utils.py | 7 +++++++ tests/cross_functional/conftest.py | 8 +++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/ocs_ci/utility/utils.py b/ocs_ci/utility/utils.py index 86fdd1790f8..dfb63719e47 100644 --- a/ocs_ci/utility/utils.py +++ b/ocs_ci/utility/utils.py @@ -5041,3 +5041,10 @@ def sum_of_two_storage_sizes(storage_size1, storage_size2, convert_size=1024): size = size1 + size2 new_storage_size = f"{size}{unit}" return new_storage_size + + +class CustomJSONEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, set): + return list(obj) + return super().default(obj) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 8de6181aef4..9cb8ed06444 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -56,7 +56,7 @@ from ocs_ci.utility.kms import is_kms_enabled -from ocs_ci.utility.utils import clone_notify +from ocs_ci.utility.utils import clone_notify, CustomJSONEncoder logger = logging.getLogger(__name__) @@ -1389,7 +1389,9 @@ def factory(hosted_cluster_conf_on_provider): ) logger.info( "Creating a hosted clusters with following deployment config: %s", - json.dumps(hosted_cluster_conf_on_provider, indent=4), + json.dumps( + hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder + ), ) # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. @@ -1422,7 +1424,7 @@ def factory(hosted_cluster_conf_on_provider): cluster_config.update(def_client_config_dict) logger.info( "Inserting new hosted cluster config to Multicluster Config " - f"\n{json.dumps(cluster_config, indent=4)}" + f"\n{json.dumps(cluster_config, indent=4, cls=CustomJSONEncoder)}" ) config.insert_cluster_config(config.nclusters, cluster_config) From b6838667a836e1a4f9d108ad2dd9d22c45fe6149 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 16:50:52 +0300 Subject: [PATCH 04/29] 0.3 Signed-off-by: Daniel Osypenko --- tests/cross_functional/conftest.py | 9 +++++++-- .../system_test/multicluster/test_orphan_clients.py | 5 ++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 9cb8ed06444..a0b5d1ce974 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1384,9 +1384,14 @@ def create_hypershift_clusters(): def factory(hosted_cluster_conf_on_provider): - worker_nodes_number = hosted_cluster_conf_on_provider.get("ENV_DATA").get( - "nodepool_replicas" + env_data = hosted_cluster_conf_on_provider.get("ENV_DATA", {}) + clusters = env_data.get("clusters", {}) + first_cluster_name = next(iter(clusters), None) + worker_nodes_number = clusters.get(first_cluster_name, {}).get( + "nodepool_replicas", None ) + + assert worker_nodes_number, "Worker nodes number is not set" logger.info( "Creating a hosted clusters with following deployment config: %s", json.dumps( diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index f158e59d8fc..4a8b97975e2 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -9,9 +9,8 @@ from ocs_ci.framework.pytest_customization.marks import ( tier4b, ) -from ocs_ci.ocs.version import get_ocs_version from ocs_ci.utility.utils import get_latest_release_version - +from ocs_ci.utility.version import get_ocs_version_from_csv logger = logging.getLogger(__name__) @@ -50,7 +49,7 @@ def test_remove_orphan_clients_resources( 4. Verify the storage client and it's resources were removed from Provider. """ cluster_name = get_random_cluster_name() - odf_version = get_ocs_version() + odf_version = get_ocs_version_from_csv() ocp_version = get_latest_release_version() hosted_clusters_conf_on_provider = { "ENV_DATA": { From f5c2775bfbe06cb73c5917c09a7609dc06b1df41 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 18:41:19 +0300 Subject: [PATCH 05/29] 0.4 Signed-off-by: Daniel Osypenko --- .../system_test/multicluster/test_orphan_clients.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index 4a8b97975e2..13087e71b08 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -49,7 +49,7 @@ def test_remove_orphan_clients_resources( 4. Verify the storage client and it's resources were removed from Provider. """ cluster_name = get_random_cluster_name() - odf_version = get_ocs_version_from_csv() + odf_version = str(get_ocs_version_from_csv()).split(".")[0] ocp_version = get_latest_release_version() hosted_clusters_conf_on_provider = { "ENV_DATA": { From 382fa5ad7ac8c30270fa74aee9b319f1f16c1d6c Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 18:58:42 +0300 Subject: [PATCH 06/29] 0.5 Signed-off-by: Daniel Osypenko --- .../system_test/multicluster/test_orphan_clients.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index 13087e71b08..7eadc160f36 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -1,4 +1,5 @@ import logging +from typing import re import pytest @@ -49,7 +50,10 @@ def test_remove_orphan_clients_resources( 4. Verify the storage client and it's resources were removed from Provider. """ cluster_name = get_random_cluster_name() - odf_version = str(get_ocs_version_from_csv()).split(".")[0] + odf_version = re.sub(r"\.stable$", "", str(get_ocs_version_from_csv())) + + logger.info(f">>>> version \n{odf_version}\n <<<<") + ocp_version = get_latest_release_version() hosted_clusters_conf_on_provider = { "ENV_DATA": { From 37b02fc6de84d3bdc3f286302d08563b59f88747 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 19:04:07 +0300 Subject: [PATCH 07/29] 0.5 Signed-off-by: Daniel Osypenko --- .../system_test/multicluster/test_orphan_clients.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index 7eadc160f36..f379cab17ce 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -1,5 +1,4 @@ import logging -from typing import re import pytest @@ -50,7 +49,7 @@ def test_remove_orphan_clients_resources( 4. Verify the storage client and it's resources were removed from Provider. """ cluster_name = get_random_cluster_name() - odf_version = re.sub(r"\.stable$", "", str(get_ocs_version_from_csv())) + odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") logger.info(f">>>> version \n{odf_version}\n <<<<") From 1b59a66c7c9af7ad33d16bae16a9ce5802d6b153 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 19:29:06 +0300 Subject: [PATCH 08/29] 0.51 Signed-off-by: Daniel Osypenko --- tests/cross_functional/conftest.py | 13 ++++++------- .../system_test/multicluster/test_orphan_clients.py | 2 -- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index a0b5d1ce974..7b72e858d1a 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -9,7 +9,6 @@ import yaml -# from ocs_ci.deployment.hosted_cluster import HostedClients from ocs_ci.utility import version from ocs_ci.utility.retry import retry from ocs_ci.framework import config, Config @@ -1412,12 +1411,12 @@ def factory(hosted_cluster_conf_on_provider): for cluster_name in cluster_names: cluster_config = Config() - with open( - os.path.join( - os.path.dirname(os.path.abspath(__file__)), - f"conf/deployment/fusion_hci_pc/hypershift_client_bm_{worker_nodes_number}w.yaml", - ) - ) as file_stream: + curr_dir = os.path.dirname(os.path.abspath(__file__)) + rel_path = f"conf/deployment/fusion_hci_pc/hypershift_client_bm_{worker_nodes_number}w.yaml" + full_path = os.path.join(curr_dir, rel_path) + if not os.path.exists(full_path): + raise FileNotFoundError(f"File {full_path} not found") + with open(full_path) as file_stream: def_client_config_dict = { k: (v if v is not None else {}) for (k, v) in yaml.safe_load(file_stream).items() diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index f379cab17ce..35abf581183 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -51,8 +51,6 @@ def test_remove_orphan_clients_resources( cluster_name = get_random_cluster_name() odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") - logger.info(f">>>> version \n{odf_version}\n <<<<") - ocp_version = get_latest_release_version() hosted_clusters_conf_on_provider = { "ENV_DATA": { From 02f02c3a3aabb210295a892a48f859553af4c25f Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 19:45:29 +0300 Subject: [PATCH 09/29] 0.52 Signed-off-by: Daniel Osypenko --- ocs_ci/ocs/constants.py | 2 ++ tests/cross_functional/conftest.py | 17 +++++++++-------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ocs_ci/ocs/constants.py b/ocs_ci/ocs/constants.py index 5f19f5fe1a7..2b677bf6de3 100644 --- a/ocs_ci/ocs/constants.py +++ b/ocs_ci/ocs/constants.py @@ -17,6 +17,8 @@ # Directories TOP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) CONF_DIR = os.path.join(TOP_DIR, "conf") +DEPLOYMENT_CONF_DIR = os.path.join(CONF_DIR, "deployment") +FUSION_CONF_DIR = os.path.join(DEPLOYMENT_CONF_DIR, "fusion_hci_pc") FRAMEWORK_CONF_DIR = os.path.join(TOP_DIR, "ocs_ci", "framework", "conf") OCP_VERSION_CONF_DIR = os.path.join(FRAMEWORK_CONF_DIR, "ocp_version") OCS_VERSION_CONF_DIR = os.path.join(FRAMEWORK_CONF_DIR, "ocs_version") diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 7b72e858d1a..7366c306aeb 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -9,6 +9,7 @@ import yaml +from ocs_ci.ocs.constants import FUSION_CONF_DIR from ocs_ci.utility import version from ocs_ci.utility.retry import retry from ocs_ci.framework import config, Config @@ -1410,13 +1411,13 @@ def factory(hosted_cluster_conf_on_provider): config.update(hosted_cluster_conf_on_provider) for cluster_name in cluster_names: - cluster_config = Config() - curr_dir = os.path.dirname(os.path.abspath(__file__)) - rel_path = f"conf/deployment/fusion_hci_pc/hypershift_client_bm_{worker_nodes_number}w.yaml" - full_path = os.path.join(curr_dir, rel_path) - if not os.path.exists(full_path): - raise FileNotFoundError(f"File {full_path} not found") - with open(full_path) as file_stream: + + client_conf_default_dir = os.path.join( + FUSION_CONF_DIR, f"hypershift_client_bm_{worker_nodes_number}w.yaml" + ) + if not os.path.exists(client_conf_default_dir): + raise FileNotFoundError(f"File {client_conf_default_dir} not found") + with open(client_conf_default_dir) as file_stream: def_client_config_dict = { k: (v if v is not None else {}) for (k, v) in yaml.safe_load(file_stream).items() @@ -1424,7 +1425,7 @@ def factory(hosted_cluster_conf_on_provider): def_client_config_dict.get("ENV_DATA").update( {"cluster_name": cluster_name} ) - + cluster_config = Config() cluster_config.update(def_client_config_dict) logger.info( "Inserting new hosted cluster config to Multicluster Config " From 2ba52b45444b979e84c423ccdea6b1e342ac9cee Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 19:56:17 +0300 Subject: [PATCH 10/29] 0.53 Signed-off-by: Daniel Osypenko --- tests/cross_functional/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 7366c306aeb..7889fe7b555 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1429,7 +1429,7 @@ def factory(hosted_cluster_conf_on_provider): cluster_config.update(def_client_config_dict) logger.info( "Inserting new hosted cluster config to Multicluster Config " - f"\n{json.dumps(cluster_config, indent=4, cls=CustomJSONEncoder)}" + f"\n{json.dumps(vars(cluster_config), indent=4, cls=CustomJSONEncoder)}" ) config.insert_cluster_config(config.nclusters, cluster_config) From c94342f6b627c88ee1604fdb04b58014f46c7a7f Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 16 Jun 2024 21:27:08 +0300 Subject: [PATCH 11/29] 1.0 Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/hosted_cluster.py | 22 ++++- tests/cross_functional/conftest.py | 15 ++- .../multicluster/test_orphan_clients.py | 98 ++++++++++++++++++- 3 files changed, 124 insertions(+), 11 deletions(-) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index cfeae8f1328..c6d3f9cda6b 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -51,6 +51,7 @@ class HostedClients(HyperShiftBase): def __init__(self): HyperShiftBase.__init__(self) + self.kubeconfig_paths = None if not config.ENV_DATA.get("clusters"): raise ValueError( "No 'clusters': '{: }' set to ENV_DATA" @@ -312,12 +313,27 @@ def download_hosted_clusters_kubeconfig_files(self): if not (self.hcp_binary_exists() and self.hypershift_binary_exists()): self.download_hcp_binary_with_podman() - kubeconfig_paths = [] for name in config.ENV_DATA.get("clusters").keys(): path = config.ENV_DATA.get("clusters").get(name).get("hosted_cluster_path") - kubeconfig_paths.append(self.download_hosted_cluster_kubeconfig(name, path)) + self.kubeconfig_paths.append( + self.download_hosted_cluster_kubeconfig(name, path) + ) - return kubeconfig_paths + return self.kubeconfig_paths + + def get_kubeconfig_path(self, cluster_name): + """ + Get the kubeconfig path for the cluster + + Args: + cluster_name: str: Name of the cluster + Returns: + str: Path to the kubeconfig file + """ + for kubeconfig_path in self.kubeconfig_paths: + if cluster_name in kubeconfig_path: + return kubeconfig_path + return None def deploy_multiple_odf_clients(self): """ diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 7889fe7b555..36d673f7b09 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -9,6 +9,7 @@ import yaml +from ocs_ci.deployment.hosted_cluster import HostedClients from ocs_ci.ocs.constants import FUSION_CONF_DIR from ocs_ci.utility import version from ocs_ci.utility.retry import retry @@ -1393,7 +1394,7 @@ def factory(hosted_cluster_conf_on_provider): assert worker_nodes_number, "Worker nodes number is not set" logger.info( - "Creating a hosted clusters with following deployment config: %s", + "Creating a hosted clusters with following deployment config: \n%s", json.dumps( hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder ), @@ -1406,8 +1407,8 @@ def factory(hosted_cluster_conf_on_provider): cluster_names = list( hosted_cluster_conf_on_provider.get("ENV_DATA").get("clusters").keys() ) - # HostedClients().do_deploy(cluster_names) - + hosted_clients_obj = HostedClients() + hosted_clients_obj.do_deploy(cluster_names) config.update(hosted_cluster_conf_on_provider) for cluster_name in cluster_names: @@ -1425,9 +1426,15 @@ def factory(hosted_cluster_conf_on_provider): def_client_config_dict.get("ENV_DATA").update( {"cluster_name": cluster_name} ) + kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_names) + logger.info(f"Kubeconfig path: {kubeconfig_path}") + def_client_config_dict.get("RUN").update( + {"kubeconfig": kubeconfig_path} + ) cluster_config = Config() cluster_config.update(def_client_config_dict) - logger.info( + + logger.debug( "Inserting new hosted cluster config to Multicluster Config " f"\n{json.dumps(vars(cluster_config), indent=4, cls=CustomJSONEncoder)}" ) diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index 35abf581183..b7fdf00f841 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -4,11 +4,14 @@ from ocs_ci.deployment.helpers.hypershift_base import ( get_random_cluster_name, + HyperShiftBase, ) from ocs_ci.framework import config as ocsci_config +from ocs_ci.framework.logger_helper import log_step from ocs_ci.framework.pytest_customization.marks import ( tier4b, ) +from ocs_ci.ocs import constants from ocs_ci.utility.utils import get_latest_release_version from ocs_ci.utility.version import get_ocs_version_from_csv @@ -37,7 +40,11 @@ class TestStorageClientRemoval(object): @tier4b def test_remove_orphan_clients_resources( - self, create_hypershift_clusters, return_to_original_context + self, + create_hypershift_clusters, + pvc_factory, + pod_factory, + return_to_original_context, ): """ This test is to remove the orphaned storage client resources @@ -48,6 +55,8 @@ def test_remove_orphan_clients_resources( 3. Remove the storage client with `hcp` command. 4. Verify the storage client and it's resources were removed from Provider. """ + + log_step("Create hosted client") cluster_name = get_random_cluster_name() odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") @@ -74,10 +83,91 @@ def test_remove_orphan_clients_resources( original_cluster_index = ocsci_config.cluster_ctx.MULTICLUSTER[ "multicluster_index" ] - logger.info(f"Original cluster index: {original_cluster_index}") + log_step( + "Switch to the hosted cluster. Add block and cephfs resources and data" + ) ocsci_config.switch_to_cluster_by_name(cluster_name) - logger.info( - f"Switched to cluster with index {ocsci_config.cluster_ctx.MULTICLUSTER['multicluster_index']}" + modes = [ + ( + constants.CEPHBLOCKPOOL, + constants.ACCESS_MODE_RWO, + constants.VOLUME_MODE_FILESYSTEM, + ), + ( + constants.CEPHFILESYSTEM, + constants.ACCESS_MODE_RWO, + constants.VOLUME_MODE_FILESYSTEM, + ), + ( + constants.CEPHFILESYSTEM, + constants.ACCESS_MODE_RWX, + constants.VOLUME_MODE_FILESYSTEM, + ), + ( + constants.CEPHBLOCKPOOL, + constants.ACCESS_MODE_RWO, + constants.VOLUME_MODE_BLOCK, + ), + ( + constants.CEPHBLOCKPOOL, + constants.ACCESS_MODE_RWX, + constants.VOLUME_MODE_BLOCK, + ), + ] + self.pod_objs = list() + self.pvc_objs = list() + for mode in modes: + pvc_obj = pvc_factory( + interface=mode[0], + access_mode=mode[1], + size=2, + volume_mode=mode[2], + status=constants.STATUS_BOUND, + ) + logger.info( + f"Created new pvc {pvc_obj.name} sc_name={mode[0]} size=2Gi, " + f"access_mode={mode[1]}, volume_mode={mode[2]}" + ) + self.pvc_objs.append(pvc_obj) + if mode[2] == constants.VOLUME_MODE_BLOCK: + pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML + storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK + raw_block_pv = True + else: + pod_dict_path = constants.NGINX_POD_YAML + storage_type = constants.WORKLOAD_STORAGE_TYPE_FS + raw_block_pv = False + logger.info( + f"Created new pod sc_name={mode[0]} size=2Gi, access_mode={mode[1]}, volume_mode={mode[2]}" + ) + pod_obj = pod_factory( + interface=mode[0], + pvc=pvc_obj, + status=constants.STATUS_RUNNING, + pod_dict_path=pod_dict_path, + raw_block_pv=raw_block_pv, + ) + pod_obj.run_io( + storage_type=storage_type, + size="1GB", + verify=True, + ) + self.pod_objs.append(pod_obj) + + for pod_obj in self.pod_objs: + fio_result = pod_obj.get_fio_results() + logger.info("IOPs after FIO:") + reads = fio_result.get("jobs")[0].get("read").get("iops") + writes = fio_result.get("jobs")[0].get("write").get("iops") + logger.info(f"Read: {reads}") + logger.info(f"Write: {writes}") + + log_step("Remove the storage client with `hcp` command") + ocsci_config.switch_ctx(original_cluster_index) + HyperShiftBase().destroy_kubevirt_cluster(cluster_name) + + log_step( + "Verify the storage client and it's resources were removed from Provider" ) From 2e765248aa5ff8cd3744072ce1ea84e9c49fbd0c Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 09:43:20 +0300 Subject: [PATCH 12/29] 1.1 Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/hosted_cluster.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index c6d3f9cda6b..8e3028642a3 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -52,10 +52,6 @@ class HostedClients(HyperShiftBase): def __init__(self): HyperShiftBase.__init__(self) self.kubeconfig_paths = None - if not config.ENV_DATA.get("clusters"): - raise ValueError( - "No 'clusters': '{: }' set to ENV_DATA" - ) def do_deploy(self, cluster_names=None): """ From df96d1d5868a17bce703796255c682f9e4a65dd0 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 10:10:44 +0300 Subject: [PATCH 13/29] 1.2 Signed-off-by: Daniel Osypenko --- tests/cross_functional/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 36d673f7b09..99553ea163f 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1399,6 +1399,7 @@ def factory(hosted_cluster_conf_on_provider): hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder ), ) + config.update(hosted_cluster_conf_on_provider) # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. # To distinguish between clusters intended for deployment on deployment CI stage and those intended for @@ -1409,7 +1410,6 @@ def factory(hosted_cluster_conf_on_provider): ) hosted_clients_obj = HostedClients() hosted_clients_obj.do_deploy(cluster_names) - config.update(hosted_cluster_conf_on_provider) for cluster_name in cluster_names: From 9d22dee0bb3b445b79156030a8525fb7a2bd5014 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 14:26:14 +0300 Subject: [PATCH 14/29] 1.21 Signed-off-by: Daniel Osypenko --- .../fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml | 2 +- ocs_ci/deployment/hosted_cluster.py | 2 +- ocs_ci/utility/utils.py | 4 ++++ .../system_test/multicluster/test_orphan_clients.py | 2 +- 4 files changed, 7 insertions(+), 3 deletions(-) diff --git a/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml b/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml index 83290b384fd..681fe7e6e78 100644 --- a/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml +++ b/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml @@ -6,7 +6,7 @@ ENV_DATA: platform: 'hci_baremetal' cluster_type: 'provider' cluster_namespace: "openshift-storage" - deployment_type: 'upi' + deployment_type: 'ai' worker_replicas: 3 master_replicas: 3 mon_type: 'hostpath' diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index 8e3028642a3..752c152cd67 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -51,7 +51,7 @@ class HostedClients(HyperShiftBase): def __init__(self): HyperShiftBase.__init__(self) - self.kubeconfig_paths = None + self.kubeconfig_paths = [] def do_deploy(self, cluster_names=None): """ diff --git a/ocs_ci/utility/utils.py b/ocs_ci/utility/utils.py index dfb63719e47..4e87916fb3b 100644 --- a/ocs_ci/utility/utils.py +++ b/ocs_ci/utility/utils.py @@ -5044,6 +5044,10 @@ def sum_of_two_storage_sizes(storage_size1, storage_size2, convert_size=1024): class CustomJSONEncoder(json.JSONEncoder): + """ + Custom JSON encoder to handle set objects + """ + def default(self, obj): if isinstance(obj, set): return list(obj) diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py index b7fdf00f841..3992bc8d872 100644 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py @@ -57,7 +57,7 @@ def test_remove_orphan_clients_resources( """ log_step("Create hosted client") - cluster_name = get_random_cluster_name() + cluster_name = "hcp" + get_random_cluster_name() odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") ocp_version = get_latest_release_version() From e7f44dd746c56163ab8aa3771e5ef02656e70566 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 15:53:07 +0300 Subject: [PATCH 15/29] 1.22 Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/hosted_cluster.py | 6 +----- tests/cross_functional/conftest.py | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index 752c152cd67..13909ac326e 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -336,10 +336,8 @@ def deploy_multiple_odf_clients(self): Deploy multiple ODF clients on hosted OCP clusters. Method tries to deploy ODF client on all hosted OCP clusters If ODF was already deployed on some of the clusters, it will be skipped for those clusters. - Returns: - list: the list of kubeconfig paths for all hosted OCP clusters """ - kubeconfig_paths = self.update_hcp_binary() + self.update_hcp_binary() hosted_cluster_names = get_hosted_cluster_names() @@ -348,8 +346,6 @@ def deploy_multiple_odf_clients(self): hosted_odf = HostedODF(cluster_name) hosted_odf.do_deploy() - return kubeconfig_paths - class HypershiftHostedOCP(HyperShiftBase, MetalLBInstaller, CNVInstaller, Deployment): def __init__(self, name): diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 99553ea163f..204eedd8132 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1426,7 +1426,7 @@ def factory(hosted_cluster_conf_on_provider): def_client_config_dict.get("ENV_DATA").update( {"cluster_name": cluster_name} ) - kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_names) + kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_name) logger.info(f"Kubeconfig path: {kubeconfig_path}") def_client_config_dict.get("RUN").update( {"kubeconfig": kubeconfig_path} From ded0e02cd87c9a1ee22719c51b891b39dd37c01d Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 18:16:56 +0300 Subject: [PATCH 16/29] 0.1 Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/helpers/hypershift_base.py | 6 +- tests/cross_functional/conftest.py | 70 +++++-- .../multicluster/test_orphan_clients.py | 173 ------------------ .../test_provider_create_hosted_cluster.py | 40 ++++ 4 files changed, 98 insertions(+), 191 deletions(-) delete mode 100644 tests/cross_functional/system_test/multicluster/test_orphan_clients.py diff --git a/ocs_ci/deployment/helpers/hypershift_base.py b/ocs_ci/deployment/helpers/hypershift_base.py index e217913f2cf..801066cbe16 100644 --- a/ocs_ci/deployment/helpers/hypershift_base.py +++ b/ocs_ci/deployment/helpers/hypershift_base.py @@ -81,7 +81,9 @@ def get_random_cluster_name(): random_letters = "".join( random.choice(string.ascii_lowercase) for _ in range(3) ) - cluster_name = hcp_version + "-" + bm_name[match.start() :] + random_letters + cluster_name = ( + "hcp" + hcp_version + "-" + bm_name[match.start() :] + random_letters + ) else: raise ValueError("Cluster name not found in the env data") return cluster_name @@ -607,7 +609,7 @@ def destroy_kubevirt_cluster(self, name): Args: name (str): Name of the cluster """ - destroy_timeout_min = 10 + destroy_timeout_min = 15 logger.info( f"Destroying HyperShift hosted cluster {name}. Timeout: {destroy_timeout_min} min" ) diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 204eedd8132..40fa2f3809e 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -9,6 +9,7 @@ import yaml +from ocs_ci.deployment.helpers.hypershift_base import HyperShiftBase from ocs_ci.deployment.hosted_cluster import HostedClients from ocs_ci.ocs.constants import FUSION_CONF_DIR from ocs_ci.utility import version @@ -1363,12 +1364,12 @@ def create_hypershift_clusters(): """ Create hosted hyperhift clusters. - Here we reach cluster deployment configuration that was set in the Test. With this configuration we + Here we create cluster deployment configuration that was set in the Test. With this configuration we create a hosted cluster. After successful creation of the hosted cluster, we update the Multicluster Config, adding the new cluster configuration to the list of the clusters. Now we can operate with new and old clusters switching the context of Multicluster Config - Expects following dictionary config to be set in the Test (might be updated, check the documetation): + Following arguments are necessary to build the hosted cluster configuration: ENV_DATA: clusters: : @@ -1383,16 +1384,34 @@ def create_hypershift_clusters(): """ - def factory(hosted_cluster_conf_on_provider): + def factory( + cluster_names, ocp_version, odf_version, setup_storage_client, nodepool_replicas + ): + """ + Factory function implementing the fixture - env_data = hosted_cluster_conf_on_provider.get("ENV_DATA", {}) - clusters = env_data.get("clusters", {}) - first_cluster_name = next(iter(clusters), None) - worker_nodes_number = clusters.get(first_cluster_name, {}).get( - "nodepool_replicas", None - ) + Args: + cluster_names (list): List of cluster names + ocp_version (str): OCP version + odf_version (str): ODF version + setup_storage_client (bool): Setup storage client + nodepool_replicas (int): Nodepool replicas; supported values are 2,3 + + """ + hosted_cluster_conf_on_provider = {"ENV_DATA": {"clusters": {}}} + + for cluster_name in cluster_names: + hosted_cluster_conf_on_provider["ENV_DATA"]["clusters"][cluster_name] = { + "hosted_cluster_path": f"~/clusters/{cluster_name}/openshift-cluster-dir", + "ocp_version": ocp_version, + "cpu_cores_per_hosted_cluster": 8, + "memory_per_hosted_cluster": "12Gi", + "hosted_odf_registry": "quay.io/rhceph-dev/ocs-registry", + "hosted_odf_version": odf_version, + "setup_storage_client": setup_storage_client, + "nodepool_replicas": nodepool_replicas, + } - assert worker_nodes_number, "Worker nodes number is not set" logger.info( "Creating a hosted clusters with following deployment config: \n%s", json.dumps( @@ -1405,16 +1424,14 @@ def factory(hosted_cluster_conf_on_provider): # To distinguish between clusters intended for deployment on deployment CI stage and those intended for # deployment on the Test stage, we pass the names of the clusters to be deployed to the # HostedClients().do_deploy() method. - cluster_names = list( - hosted_cluster_conf_on_provider.get("ENV_DATA").get("clusters").keys() - ) hosted_clients_obj = HostedClients() - hosted_clients_obj.do_deploy(cluster_names) + deployed_hosted_cluster_objects = hosted_clients_obj.do_deploy(cluster_names) + deployed_clusters = [obj.name for obj in deployed_hosted_cluster_objects] - for cluster_name in cluster_names: + for cluster_name in deployed_clusters: client_conf_default_dir = os.path.join( - FUSION_CONF_DIR, f"hypershift_client_bm_{worker_nodes_number}w.yaml" + FUSION_CONF_DIR, f"hypershift_client_bm_{nodepool_replicas}w.yaml" ) if not os.path.exists(client_conf_default_dir): raise FileNotFoundError(f"File {client_conf_default_dir} not found") @@ -1441,3 +1458,24 @@ def factory(hosted_cluster_conf_on_provider): config.insert_cluster_config(config.nclusters, cluster_config) return factory + + +@pytest.fixture() +def destroy_hosted_cluster(): + def factory(cluster_name): + config.switch_to_provider() + logger.info("Destroying hosted cluster. OCS related leftovers are expected") + hypershift_base_obj = HyperShiftBase() + + if not hypershift_base_obj.hcp_binary_exists(): + hypershift_base_obj.update_hcp_binary() + + destroy_res = HyperShiftBase().destroy_kubevirt_cluster(cluster_name) + + if destroy_res: + logger.info("Removing cluster from Multicluster Config") + config.remove_cluster_by_name(cluster_name) + + return destroy_res + + return factory diff --git a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py b/tests/cross_functional/system_test/multicluster/test_orphan_clients.py deleted file mode 100644 index 3992bc8d872..00000000000 --- a/tests/cross_functional/system_test/multicluster/test_orphan_clients.py +++ /dev/null @@ -1,173 +0,0 @@ -import logging - -import pytest - -from ocs_ci.deployment.helpers.hypershift_base import ( - get_random_cluster_name, - HyperShiftBase, -) -from ocs_ci.framework import config as ocsci_config -from ocs_ci.framework.logger_helper import log_step -from ocs_ci.framework.pytest_customization.marks import ( - tier4b, -) -from ocs_ci.ocs import constants -from ocs_ci.utility.utils import get_latest_release_version -from ocs_ci.utility.version import get_ocs_version_from_csv - -logger = logging.getLogger(__name__) - - -@pytest.fixture -def return_to_original_context(request): - """ - Make sure that original context is restored after the test. - """ - original_cluster = ocsci_config.cluster_ctx.MULTICLUSTER["multicluster_index"] - - def finalizer(): - logger.info(f"Switching back to original cluster with index {original_cluster}") - ocsci_config.switch_ctx(original_cluster) - - request.addfinalizer(finalizer) - yield - - -class TestStorageClientRemoval(object): - """ - Test storage client removal - """ - - @tier4b - def test_remove_orphan_clients_resources( - self, - create_hypershift_clusters, - pvc_factory, - pod_factory, - return_to_original_context, - ): - """ - This test is to remove the orphaned storage client resources - - Steps: - 1. Create hosted client. - 2. Add block and cephfs resources and data on hosted client. - 3. Remove the storage client with `hcp` command. - 4. Verify the storage client and it's resources were removed from Provider. - """ - - log_step("Create hosted client") - cluster_name = "hcp" + get_random_cluster_name() - odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") - - ocp_version = get_latest_release_version() - hosted_clusters_conf_on_provider = { - "ENV_DATA": { - "clusters": { - cluster_name: { - "hosted_cluster_path": f"~/clusters/{cluster_name}/openshift-cluster-dir", - "ocp_version": ocp_version, - "cpu_cores_per_hosted_cluster": 8, - "memory_per_hosted_cluster": "12Gi", - "hosted_odf_registry": "quay.io/rhceph-dev/ocs-registry", - "hosted_odf_version": odf_version, - "setup_storage_client": True, - "nodepool_replicas": 2, - } - } - } - } - - create_hypershift_clusters(hosted_clusters_conf_on_provider) - - original_cluster_index = ocsci_config.cluster_ctx.MULTICLUSTER[ - "multicluster_index" - ] - - log_step( - "Switch to the hosted cluster. Add block and cephfs resources and data" - ) - ocsci_config.switch_to_cluster_by_name(cluster_name) - - modes = [ - ( - constants.CEPHBLOCKPOOL, - constants.ACCESS_MODE_RWO, - constants.VOLUME_MODE_FILESYSTEM, - ), - ( - constants.CEPHFILESYSTEM, - constants.ACCESS_MODE_RWO, - constants.VOLUME_MODE_FILESYSTEM, - ), - ( - constants.CEPHFILESYSTEM, - constants.ACCESS_MODE_RWX, - constants.VOLUME_MODE_FILESYSTEM, - ), - ( - constants.CEPHBLOCKPOOL, - constants.ACCESS_MODE_RWO, - constants.VOLUME_MODE_BLOCK, - ), - ( - constants.CEPHBLOCKPOOL, - constants.ACCESS_MODE_RWX, - constants.VOLUME_MODE_BLOCK, - ), - ] - self.pod_objs = list() - self.pvc_objs = list() - for mode in modes: - pvc_obj = pvc_factory( - interface=mode[0], - access_mode=mode[1], - size=2, - volume_mode=mode[2], - status=constants.STATUS_BOUND, - ) - logger.info( - f"Created new pvc {pvc_obj.name} sc_name={mode[0]} size=2Gi, " - f"access_mode={mode[1]}, volume_mode={mode[2]}" - ) - self.pvc_objs.append(pvc_obj) - if mode[2] == constants.VOLUME_MODE_BLOCK: - pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML - storage_type = constants.WORKLOAD_STORAGE_TYPE_BLOCK - raw_block_pv = True - else: - pod_dict_path = constants.NGINX_POD_YAML - storage_type = constants.WORKLOAD_STORAGE_TYPE_FS - raw_block_pv = False - logger.info( - f"Created new pod sc_name={mode[0]} size=2Gi, access_mode={mode[1]}, volume_mode={mode[2]}" - ) - pod_obj = pod_factory( - interface=mode[0], - pvc=pvc_obj, - status=constants.STATUS_RUNNING, - pod_dict_path=pod_dict_path, - raw_block_pv=raw_block_pv, - ) - pod_obj.run_io( - storage_type=storage_type, - size="1GB", - verify=True, - ) - self.pod_objs.append(pod_obj) - - for pod_obj in self.pod_objs: - fio_result = pod_obj.get_fio_results() - logger.info("IOPs after FIO:") - reads = fio_result.get("jobs")[0].get("read").get("iops") - writes = fio_result.get("jobs")[0].get("write").get("iops") - logger.info(f"Read: {reads}") - logger.info(f"Write: {writes}") - - log_step("Remove the storage client with `hcp` command") - ocsci_config.switch_ctx(original_cluster_index) - HyperShiftBase().destroy_kubevirt_cluster(cluster_name) - - log_step( - "Verify the storage client and it's resources were removed from Provider" - ) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 39ce459a29e..fbe1fa8c9cd 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -4,6 +4,7 @@ from ocs_ci.deployment.deployment import validate_acm_hub_install, Deployment from ocs_ci.deployment.helpers.hypershift_base import ( get_hosted_cluster_names, + get_random_cluster_name, ) from ocs_ci.deployment.hosted_cluster import ( HypershiftHostedOCP, @@ -11,12 +12,17 @@ HostedClients, ) from ocs_ci.framework import config +from ocs_ci.framework.logger_helper import log_step from ocs_ci.framework.pytest_customization.marks import ( hci_provider_required, libtest, purple_squad, runs_on_provider, ) +from ocs_ci.ocs.ocp import OCP +from ocs_ci.utility.utils import get_latest_release_version +from ocs_ci.utility.version import get_ocs_version_from_csv +from ocs_ci.framework import config as ocsci_config from ocs_ci.ocs import constants from ocs_ci.ocs.resources.storage_client import StorageClient from ocs_ci.helpers.helpers import ( @@ -132,6 +138,40 @@ def test_storage_client_connected(self): cluster_names = list(config.ENV_DATA["clusters"].keys()) assert HostedODF(cluster_names[-1]).get_storage_client_status() == "Connected" + @runs_on_provider + @hci_provider_required + def test_create_hosted_cluster_with_fixture( + self, create_hypershift_clusters, destroy_hosted_cluster + ): + """ + Test create hosted cluster with fixture + """ + log_step("Create hosted client") + cluster_name = get_random_cluster_name() + odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") + ocp_version = get_latest_release_version() + nodepool_replicas = 2 + + create_hypershift_clusters( + cluster_names=[cluster_name], + ocp_version=ocp_version, + odf_version=odf_version, + setup_storage_client=True, + nodepool_replicas=nodepool_replicas, + ) + + log_step("Switch to the hosted cluster") + ocsci_config.switch_to_cluster_by_name(cluster_name) + + server = OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) + + assert ( + cluster_name in server + ), f"Failed to switch to cluster '{cluster_name}' and fetch data" + + log_step("Destroy hosted cluster") + assert destroy_hosted_cluster(cluster_name), "Failed to destroy hosted cluster" + @runs_on_provider @hci_provider_required def test_deploy_acm(self): From aa725a6737b813d6755d90d05b026ca970ef18e0 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 18:23:43 +0300 Subject: [PATCH 17/29] 0.2 Signed-off-by: Daniel Osypenko --- .../fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml b/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml index 681fe7e6e78..83290b384fd 100644 --- a/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml +++ b/conf/deployment/fusion_hci_pc/provider_bm_upi_1az_rhcos_nvme_3m_3w.yaml @@ -6,7 +6,7 @@ ENV_DATA: platform: 'hci_baremetal' cluster_type: 'provider' cluster_namespace: "openshift-storage" - deployment_type: 'ai' + deployment_type: 'upi' worker_replicas: 3 master_replicas: 3 mon_type: 'hostpath' From d72410bdf1759bfea75f8550d791aaa34dd75816 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 18:25:45 +0300 Subject: [PATCH 18/29] 0.21 Signed-off-by: Daniel Osypenko --- tests/libtest/test_provider_create_hosted_cluster.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index fbe1fa8c9cd..2f502cbde39 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -163,7 +163,9 @@ def test_create_hosted_cluster_with_fixture( log_step("Switch to the hosted cluster") ocsci_config.switch_to_cluster_by_name(cluster_name) - server = OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) + server = str( + OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) + ) assert ( cluster_name in server From bde7315de90bdfa875488ad77fcc52e3f4e8f7c7 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 18:38:12 +0300 Subject: [PATCH 19/29] 0.22 Signed-off-by: Daniel Osypenko --- tests/libtest/test_provider_create_hosted_cluster.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 2f502cbde39..42509600196 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -23,6 +23,10 @@ from ocs_ci.utility.utils import get_latest_release_version from ocs_ci.utility.version import get_ocs_version_from_csv from ocs_ci.framework import config as ocsci_config +from tests.cross_functional.conftest import ( + create_hypershift_clusters, + destroy_hosted_cluster, +) from ocs_ci.ocs import constants from ocs_ci.ocs.resources.storage_client import StorageClient from ocs_ci.helpers.helpers import ( From 2f70f5f7bd4fea297c77f8f635dfab208cd4f58c Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 18:46:45 +0300 Subject: [PATCH 20/29] 0.23 Signed-off-by: Daniel Osypenko --- tests/conftest.py | 131 ++++++++++++++++- tests/cross_functional/conftest.py | 132 +----------------- .../test_provider_create_hosted_cluster.py | 4 - 3 files changed, 132 insertions(+), 135 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index b02fd25226c..d3c1d1e9879 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,12 +15,15 @@ from functools import partial import boto3 +import yaml from botocore.exceptions import ClientError import pytest from collections import namedtuple from ocs_ci.deployment import factory as dep_factory -from ocs_ci.framework import config as ocsci_config +from ocs_ci.deployment.helpers.hypershift_base import HyperShiftBase +from ocs_ci.deployment.hosted_cluster import HostedClients +from ocs_ci.framework import config as ocsci_config, Config import ocs_ci.framework.pytest_customization.marks from ocs_ci.framework.pytest_customization.marks import ( deployment, @@ -39,6 +42,7 @@ craft_s3_command, put_bucket_policy, ) +from ocs_ci.ocs.constants import FUSION_CONF_DIR from ocs_ci.ocs.dr.dr_workload import BusyBox, BusyBox_AppSet, CnvWorkload from ocs_ci.ocs.exceptions import ( CommandFailed, @@ -147,6 +151,7 @@ skipif_ui_not_support, run_cmd, ceph_health_check_multi_storagecluster_external, + CustomJSONEncoder, ) from ocs_ci.helpers import helpers, dr_helpers from ocs_ci.helpers.helpers import ( @@ -7931,3 +7936,127 @@ def finalizer(): request.addfinalizer(finalizer) return factory + + +@pytest.fixture() +def create_hypershift_clusters(): + """ + Create hosted hyperhift clusters. + + Here we create cluster deployment configuration that was set in the Test. With this configuration we + create a hosted cluster. After successful creation of the hosted cluster, we update the Multicluster Config, + adding the new cluster configuration to the list of the clusters. Now we can operate with new and old clusters + switching the context of Multicluster Config + + Following arguments are necessary to build the hosted cluster configuration: + ENV_DATA: + clusters: + : + hosted_cluster_path: + ocp_version: + cpu_cores_per_hosted_cluster: + memory_per_hosted_cluster: + hosted_odf_registry: + hosted_odf_version: + setup_storage_client: + nodepool_replicas: + + """ + + def factory( + cluster_names, ocp_version, odf_version, setup_storage_client, nodepool_replicas + ): + """ + Factory function implementing the fixture + + Args: + cluster_names (list): List of cluster names + ocp_version (str): OCP version + odf_version (str): ODF version + setup_storage_client (bool): Setup storage client + nodepool_replicas (int): Nodepool replicas; supported values are 2,3 + + """ + hosted_cluster_conf_on_provider = {"ENV_DATA": {"clusters": {}}} + + for cluster_name in cluster_names: + hosted_cluster_conf_on_provider["ENV_DATA"]["clusters"][cluster_name] = { + "hosted_cluster_path": f"~/clusters/{cluster_name}/openshift-cluster-dir", + "ocp_version": ocp_version, + "cpu_cores_per_hosted_cluster": 8, + "memory_per_hosted_cluster": "12Gi", + "hosted_odf_registry": "quay.io/rhceph-dev/ocs-registry", + "hosted_odf_version": odf_version, + "setup_storage_client": setup_storage_client, + "nodepool_replicas": nodepool_replicas, + } + + log.info( + "Creating a hosted clusters with following deployment config: \n%s", + json.dumps( + hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder + ), + ) + ocsci_config.update(hosted_cluster_conf_on_provider) + + # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. + # To distinguish between clusters intended for deployment on deployment CI stage and those intended for + # deployment on the Test stage, we pass the names of the clusters to be deployed to the + # HostedClients().do_deploy() method. + hosted_clients_obj = HostedClients() + deployed_hosted_cluster_objects = hosted_clients_obj.do_deploy(cluster_names) + deployed_clusters = [obj.name for obj in deployed_hosted_cluster_objects] + + for cluster_name in deployed_clusters: + + client_conf_default_dir = os.path.join( + FUSION_CONF_DIR, f"hypershift_client_bm_{nodepool_replicas}w.yaml" + ) + if not os.path.exists(client_conf_default_dir): + raise FileNotFoundError(f"File {client_conf_default_dir} not found") + with open(client_conf_default_dir) as file_stream: + def_client_config_dict = { + k: (v if v is not None else {}) + for (k, v) in yaml.safe_load(file_stream).items() + } + def_client_config_dict.get("ENV_DATA").update( + {"cluster_name": cluster_name} + ) + kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_name) + log.info(f"Kubeconfig path: {kubeconfig_path}") + def_client_config_dict.get("RUN").update( + {"kubeconfig": kubeconfig_path} + ) + cluster_config = Config() + cluster_config.update(def_client_config_dict) + + log.debug( + "Inserting new hosted cluster config to Multicluster Config " + f"\n{json.dumps(vars(cluster_config), indent=4, cls=CustomJSONEncoder)}" + ) + ocsci_config.insert_cluster_config( + ocsci_config.nclusters, cluster_config + ) + + return factory + + +@pytest.fixture() +def destroy_hosted_cluster(): + def factory(cluster_name): + ocsci_config.switch_to_provider() + log.info("Destroying hosted cluster. OCS related leftovers are expected") + hypershift_base_obj = HyperShiftBase() + + if not hypershift_base_obj.hcp_binary_exists(): + hypershift_base_obj.update_hcp_binary() + + destroy_res = HyperShiftBase().destroy_kubevirt_cluster(cluster_name) + + if destroy_res: + log.info("Removing cluster from Multicluster Config") + ocsci_config.remove_cluster_by_name(cluster_name) + + return destroy_res + + return factory diff --git a/tests/cross_functional/conftest.py b/tests/cross_functional/conftest.py index 40fa2f3809e..56bb6b2c63a 100644 --- a/tests/cross_functional/conftest.py +++ b/tests/cross_functional/conftest.py @@ -1,5 +1,4 @@ import os -import json import logging import boto3 import pytest @@ -7,14 +6,9 @@ from concurrent.futures import ThreadPoolExecutor from threading import Event -import yaml - -from ocs_ci.deployment.helpers.hypershift_base import HyperShiftBase -from ocs_ci.deployment.hosted_cluster import HostedClients -from ocs_ci.ocs.constants import FUSION_CONF_DIR from ocs_ci.utility import version from ocs_ci.utility.retry import retry -from ocs_ci.framework import config, Config +from ocs_ci.framework import config from ocs_ci.helpers.e2e_helpers import ( create_muliple_types_provider_obcs, validate_mcg_bucket_replicaton, @@ -58,7 +52,7 @@ from ocs_ci.utility.kms import is_kms_enabled -from ocs_ci.utility.utils import clone_notify, CustomJSONEncoder +from ocs_ci.utility.utils import clone_notify logger = logging.getLogger(__name__) @@ -1357,125 +1351,3 @@ def factory( return feature_setup_map return factory - - -@pytest.fixture() -def create_hypershift_clusters(): - """ - Create hosted hyperhift clusters. - - Here we create cluster deployment configuration that was set in the Test. With this configuration we - create a hosted cluster. After successful creation of the hosted cluster, we update the Multicluster Config, - adding the new cluster configuration to the list of the clusters. Now we can operate with new and old clusters - switching the context of Multicluster Config - - Following arguments are necessary to build the hosted cluster configuration: - ENV_DATA: - clusters: - : - hosted_cluster_path: - ocp_version: - cpu_cores_per_hosted_cluster: - memory_per_hosted_cluster: - hosted_odf_registry: - hosted_odf_version: - setup_storage_client: - nodepool_replicas: - - """ - - def factory( - cluster_names, ocp_version, odf_version, setup_storage_client, nodepool_replicas - ): - """ - Factory function implementing the fixture - - Args: - cluster_names (list): List of cluster names - ocp_version (str): OCP version - odf_version (str): ODF version - setup_storage_client (bool): Setup storage client - nodepool_replicas (int): Nodepool replicas; supported values are 2,3 - - """ - hosted_cluster_conf_on_provider = {"ENV_DATA": {"clusters": {}}} - - for cluster_name in cluster_names: - hosted_cluster_conf_on_provider["ENV_DATA"]["clusters"][cluster_name] = { - "hosted_cluster_path": f"~/clusters/{cluster_name}/openshift-cluster-dir", - "ocp_version": ocp_version, - "cpu_cores_per_hosted_cluster": 8, - "memory_per_hosted_cluster": "12Gi", - "hosted_odf_registry": "quay.io/rhceph-dev/ocs-registry", - "hosted_odf_version": odf_version, - "setup_storage_client": setup_storage_client, - "nodepool_replicas": nodepool_replicas, - } - - logger.info( - "Creating a hosted clusters with following deployment config: \n%s", - json.dumps( - hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder - ), - ) - config.update(hosted_cluster_conf_on_provider) - - # During the initial deployment phase, we always deploy Hosting and specific Hosted clusters. - # To distinguish between clusters intended for deployment on deployment CI stage and those intended for - # deployment on the Test stage, we pass the names of the clusters to be deployed to the - # HostedClients().do_deploy() method. - hosted_clients_obj = HostedClients() - deployed_hosted_cluster_objects = hosted_clients_obj.do_deploy(cluster_names) - deployed_clusters = [obj.name for obj in deployed_hosted_cluster_objects] - - for cluster_name in deployed_clusters: - - client_conf_default_dir = os.path.join( - FUSION_CONF_DIR, f"hypershift_client_bm_{nodepool_replicas}w.yaml" - ) - if not os.path.exists(client_conf_default_dir): - raise FileNotFoundError(f"File {client_conf_default_dir} not found") - with open(client_conf_default_dir) as file_stream: - def_client_config_dict = { - k: (v if v is not None else {}) - for (k, v) in yaml.safe_load(file_stream).items() - } - def_client_config_dict.get("ENV_DATA").update( - {"cluster_name": cluster_name} - ) - kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_name) - logger.info(f"Kubeconfig path: {kubeconfig_path}") - def_client_config_dict.get("RUN").update( - {"kubeconfig": kubeconfig_path} - ) - cluster_config = Config() - cluster_config.update(def_client_config_dict) - - logger.debug( - "Inserting new hosted cluster config to Multicluster Config " - f"\n{json.dumps(vars(cluster_config), indent=4, cls=CustomJSONEncoder)}" - ) - config.insert_cluster_config(config.nclusters, cluster_config) - - return factory - - -@pytest.fixture() -def destroy_hosted_cluster(): - def factory(cluster_name): - config.switch_to_provider() - logger.info("Destroying hosted cluster. OCS related leftovers are expected") - hypershift_base_obj = HyperShiftBase() - - if not hypershift_base_obj.hcp_binary_exists(): - hypershift_base_obj.update_hcp_binary() - - destroy_res = HyperShiftBase().destroy_kubevirt_cluster(cluster_name) - - if destroy_res: - logger.info("Removing cluster from Multicluster Config") - config.remove_cluster_by_name(cluster_name) - - return destroy_res - - return factory diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 42509600196..2f502cbde39 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -23,10 +23,6 @@ from ocs_ci.utility.utils import get_latest_release_version from ocs_ci.utility.version import get_ocs_version_from_csv from ocs_ci.framework import config as ocsci_config -from tests.cross_functional.conftest import ( - create_hypershift_clusters, - destroy_hosted_cluster, -) from ocs_ci.ocs import constants from ocs_ci.ocs.resources.storage_client import StorageClient from ocs_ci.helpers.helpers import ( From 70e9759ea8d08817c24f179ee9b1480f8c8a9394 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 17 Jun 2024 19:46:44 +0300 Subject: [PATCH 21/29] 0.24 Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/hosted_cluster.py | 4 +++- tests/conftest.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index 13909ac326e..f7652d422fb 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -326,10 +326,12 @@ def get_kubeconfig_path(self, cluster_name): Returns: str: Path to the kubeconfig file """ + if not self.kubeconfig_paths: + self.download_hosted_clusters_kubeconfig_files() for kubeconfig_path in self.kubeconfig_paths: if cluster_name in kubeconfig_path: return kubeconfig_path - return None + return def deploy_multiple_odf_clients(self): """ diff --git a/tests/conftest.py b/tests/conftest.py index d3c1d1e9879..6c174be3aa8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8024,7 +8024,7 @@ def factory( ) kubeconfig_path = hosted_clients_obj.get_kubeconfig_path(cluster_name) log.info(f"Kubeconfig path: {kubeconfig_path}") - def_client_config_dict.get("RUN").update( + def_client_config_dict.setdefault("RUN", {}).update( {"kubeconfig": kubeconfig_path} ) cluster_config = Config() From c4464affb4e73a902d48f2d67c824190eafe8425 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 1 Jul 2024 11:46:45 +0300 Subject: [PATCH 22/29] added create-destroy libtest Signed-off-by: Daniel Osypenko --- .../test_provider_create_hosted_cluster.py | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 2f502cbde39..8a9e943eea8 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -171,6 +171,39 @@ def test_create_hosted_cluster_with_fixture( cluster_name in server ), f"Failed to switch to cluster '{cluster_name}' and fetch data" + @runs_on_provider + @hci_provider_required + def test_create_destroy_hosted_cluster_with_fixture( + self, create_hypershift_clusters, destroy_hosted_cluster + ): + """ + Test create hosted cluster with fixture + """ + log_step("Create hosted client") + cluster_name = get_random_cluster_name() + odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") + ocp_version = get_latest_release_version() + nodepool_replicas = 2 + + create_hypershift_clusters( + cluster_names=[cluster_name], + ocp_version=ocp_version, + odf_version=odf_version, + setup_storage_client=True, + nodepool_replicas=nodepool_replicas, + ) + + log_step("Switch to the hosted cluster") + ocsci_config.switch_to_cluster_by_name(cluster_name) + + server = str( + OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) + ) + + assert ( + cluster_name in server + ), f"Failed to switch to cluster '{cluster_name}' and fetch data" + log_step("Destroy hosted cluster") assert destroy_hosted_cluster(cluster_name), "Failed to destroy hosted cluster" From 42e20e4d0573b30f4d6d3177bf86d4776e9c6c42 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 7 Jul 2024 19:52:12 +0300 Subject: [PATCH 23/29] get odf version from catsrc Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/helpers/hypershift_base.py | 9 +++++++-- ocs_ci/utility/utils.py | 17 +++++++++++++++++ .../test_provider_create_hosted_cluster.py | 16 ++++++++++++---- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/ocs_ci/deployment/helpers/hypershift_base.py b/ocs_ci/deployment/helpers/hypershift_base.py index 801066cbe16..8a0a327e7b2 100644 --- a/ocs_ci/deployment/helpers/hypershift_base.py +++ b/ocs_ci/deployment/helpers/hypershift_base.py @@ -65,7 +65,7 @@ def wrapper(self, *args, **kwargs): return wrapper -def get_random_cluster_name(): +def get_random_hosted_cluster_name(): """ Get a random cluster name @@ -82,7 +82,12 @@ def get_random_cluster_name(): random.choice(string.ascii_lowercase) for _ in range(3) ) cluster_name = ( - "hcp" + hcp_version + "-" + bm_name[match.start() :] + random_letters + "hcp" + + hcp_version + + "-bm" + + bm_name[match.start() :] + + "-" + + random_letters ) else: raise ValueError("Cluster name not found in the env data") diff --git a/ocs_ci/utility/utils.py b/ocs_ci/utility/utils.py index 4e87916fb3b..cadf5074164 100644 --- a/ocs_ci/utility/utils.py +++ b/ocs_ci/utility/utils.py @@ -58,6 +58,7 @@ NoRunningCephToolBoxException, ClusterNotInSTSModeException, ) +from ocs_ci.ocs.ocp import OCP from ocs_ci.utility import version as version_module from ocs_ci.utility.flexy import load_cluster_info @@ -5052,3 +5053,19 @@ def default(self, obj): if isinstance(obj, set): return list(obj) return super().default(obj) + + +def get_odf_tag_from_redhat_catsrc(): + """ + Get the ODF tag from the default redhat-operators Catalog Source + + Returns: + str: ODF tag from redhat-operators Catalog Source + """ + catsrc_data = OCP( + kind=constants.CATSRC, + namespace=constants.MARKETPLACE_NAMESPACE, + resource_name="redhat-operators", + ).get() + regestry_image = catsrc_data.get("spec").get("image") + return regestry_image.split(":")[-1] diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 8a9e943eea8..5acd4aa1857 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -4,7 +4,7 @@ from ocs_ci.deployment.deployment import validate_acm_hub_install, Deployment from ocs_ci.deployment.helpers.hypershift_base import ( get_hosted_cluster_names, - get_random_cluster_name, + get_random_hosted_cluster_name, ) from ocs_ci.deployment.hosted_cluster import ( HypershiftHostedOCP, @@ -20,7 +20,10 @@ runs_on_provider, ) from ocs_ci.ocs.ocp import OCP -from ocs_ci.utility.utils import get_latest_release_version +from ocs_ci.utility.utils import ( + get_latest_release_version, + get_odf_tag_from_redhat_catsrc, +) from ocs_ci.utility.version import get_ocs_version_from_csv from ocs_ci.framework import config as ocsci_config from ocs_ci.ocs import constants @@ -147,8 +150,10 @@ def test_create_hosted_cluster_with_fixture( Test create hosted cluster with fixture """ log_step("Create hosted client") - cluster_name = get_random_cluster_name() + cluster_name = get_random_hosted_cluster_name() odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") + if "rhodf" in odf_version: + odf_version = get_odf_tag_from_redhat_catsrc() ocp_version = get_latest_release_version() nodepool_replicas = 2 @@ -180,8 +185,11 @@ def test_create_destroy_hosted_cluster_with_fixture( Test create hosted cluster with fixture """ log_step("Create hosted client") - cluster_name = get_random_cluster_name() + cluster_name = get_random_hosted_cluster_name() odf_version = str(get_ocs_version_from_csv()).replace(".stable", "") + if "rhodf" in odf_version: + odf_version = get_odf_tag_from_redhat_catsrc() + ocp_version = get_latest_release_version() nodepool_replicas = 2 From 2fc252cff000f11e5f5a62d60747fad1c32a1da7 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 7 Jul 2024 20:06:02 +0300 Subject: [PATCH 24/29] 0.25 Signed-off-by: Daniel Osypenko --- ocs_ci/utility/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocs_ci/utility/utils.py b/ocs_ci/utility/utils.py index cadf5074164..ea2ccbf99a8 100644 --- a/ocs_ci/utility/utils.py +++ b/ocs_ci/utility/utils.py @@ -58,8 +58,6 @@ NoRunningCephToolBoxException, ClusterNotInSTSModeException, ) -from ocs_ci.ocs.ocp import OCP - from ocs_ci.utility import version as version_module from ocs_ci.utility.flexy import load_cluster_info from ocs_ci.utility.retry import retry @@ -5062,6 +5060,8 @@ def get_odf_tag_from_redhat_catsrc(): Returns: str: ODF tag from redhat-operators Catalog Source """ + from ocs_ci.ocs.ocp import OCP + catsrc_data = OCP( kind=constants.CATSRC, namespace=constants.MARKETPLACE_NAMESPACE, From 086b5a70e6003542d78593c11043cfaf2dc5030b Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Sun, 7 Jul 2024 20:45:11 +0300 Subject: [PATCH 25/29] 0.26 Signed-off-by: Daniel Osypenko --- tests/libtest/test_provider_create_hosted_cluster.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 5acd4aa1857..0c93f681ce1 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -168,9 +168,7 @@ def test_create_hosted_cluster_with_fixture( log_step("Switch to the hosted cluster") ocsci_config.switch_to_cluster_by_name(cluster_name) - server = str( - OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) - ) + server = str(OCP().exec_oc_cmd("whoami --show-server", out_yaml_format=False)) assert ( cluster_name in server @@ -204,9 +202,7 @@ def test_create_destroy_hosted_cluster_with_fixture( log_step("Switch to the hosted cluster") ocsci_config.switch_to_cluster_by_name(cluster_name) - server = str( - OCP().exec_oc_cmd("oc whoami --show-server", out_yaml_format=False) - ) + server = str(OCP().exec_oc_cmd("whoami --show-server", out_yaml_format=False)) assert ( cluster_name in server From 675ab0af293535511d37547764a6dba161e1ec2d Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Wed, 24 Jul 2024 11:26:50 +0300 Subject: [PATCH 26/29] review fixes Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/helpers/hypershift_base.py | 2 +- ocs_ci/deployment/hosted_cluster.py | 3 ++ ocs_ci/ocs/resources/catalog_source.py | 19 ++++++++++++ ocs_ci/utility/json.py | 19 ++++++++++++ ocs_ci/utility/utils.py | 29 ------------------- tests/conftest.py | 8 ++--- .../test_provider_create_hosted_cluster.py | 2 +- 7 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 ocs_ci/utility/json.py diff --git a/ocs_ci/deployment/helpers/hypershift_base.py b/ocs_ci/deployment/helpers/hypershift_base.py index 8a0a327e7b2..40bc677d75f 100644 --- a/ocs_ci/deployment/helpers/hypershift_base.py +++ b/ocs_ci/deployment/helpers/hypershift_base.py @@ -72,7 +72,7 @@ def get_random_hosted_cluster_name(): Returns: str: random cluster name """ - # getting the cluster name from the env data, fo instance "ibm_cloud_baremetal3; mandatory conf field" + # getting the cluster name from the env data, for instance "ibm_cloud_baremetal3; mandatory conf field" bm_name = config.ENV_DATA.get("baremetal").get("env_name") ocp_version = get_latest_release_version() hcp_version = "".join([c for c in ocp_version if c.isdigit()][:3]) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index f7652d422fb..da4bfcae15a 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -202,6 +202,9 @@ def storage_installation_requested(self, cluster_name): Args: cluster_name: str: Name of the cluster + + Returns: + bool: True if the storage client installation was requested, False otherwise """ return ( config.ENV_DATA.get("clusters", {}) diff --git a/ocs_ci/ocs/resources/catalog_source.py b/ocs_ci/ocs/resources/catalog_source.py index aedcd36b2ae..c27ef8ae7ea 100644 --- a/ocs_ci/ocs/resources/catalog_source.py +++ b/ocs_ci/ocs/resources/catalog_source.py @@ -1,6 +1,7 @@ """ CatalogSource related functionalities """ + import logging from time import sleep @@ -180,3 +181,21 @@ def enable_specific_source(source_name): ) logger.info(f"Waiting 20 seconds after enabling source: {source_name}") sleep(20) + + +def get_odf_tag_from_redhat_catsrc(): + """ + Get the ODF tag from the default redhat-operators Catalog Source + + Returns: + str: ODF tag from redhat-operators Catalog Source + """ + from ocs_ci.ocs.ocp import OCP + + catsrc_data = OCP( + kind=constants.CATSRC, + namespace=constants.MARKETPLACE_NAMESPACE, + resource_name="redhat-operators", + ).get() + regestry_image = catsrc_data.get("spec").get("image") + return regestry_image.split(":")[-1] diff --git a/ocs_ci/utility/json.py b/ocs_ci/utility/json.py new file mode 100644 index 00000000000..569635f04fc --- /dev/null +++ b/ocs_ci/utility/json.py @@ -0,0 +1,19 @@ +import json + + +class SetToListJSONEncoder(json.JSONEncoder): + """ + The CustomJSONEncoder class is a subclass of json.JSONEncoder designed to handle the serialization of Python + objects into JSON format, with a specific focus on converting set objects into lists. + This is necessary because the default JSON encoder in Python does not support set objects, which are not a valid + JSON data type. + This way we avoid "TypeError: Object of type set is not JSON serializable" + + Usage: + json.dumps(data, cls=SetToListJSONEncoder) + """ + + def default(self, obj): + if isinstance(obj, set): + return list(obj) + return super().default(obj) diff --git a/ocs_ci/utility/utils.py b/ocs_ci/utility/utils.py index ea2ccbf99a8..781c0acc0ff 100644 --- a/ocs_ci/utility/utils.py +++ b/ocs_ci/utility/utils.py @@ -5040,32 +5040,3 @@ def sum_of_two_storage_sizes(storage_size1, storage_size2, convert_size=1024): size = size1 + size2 new_storage_size = f"{size}{unit}" return new_storage_size - - -class CustomJSONEncoder(json.JSONEncoder): - """ - Custom JSON encoder to handle set objects - """ - - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return super().default(obj) - - -def get_odf_tag_from_redhat_catsrc(): - """ - Get the ODF tag from the default redhat-operators Catalog Source - - Returns: - str: ODF tag from redhat-operators Catalog Source - """ - from ocs_ci.ocs.ocp import OCP - - catsrc_data = OCP( - kind=constants.CATSRC, - namespace=constants.MARKETPLACE_NAMESPACE, - resource_name="redhat-operators", - ).get() - regestry_image = catsrc_data.get("spec").get("image") - return regestry_image.split(":")[-1] diff --git a/tests/conftest.py b/tests/conftest.py index 6c174be3aa8..32fa2eb7031 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -124,6 +124,7 @@ get_status_before_execution, get_status_after_execution, ) +from ocs_ci.utility.json import SetToListJSONEncoder from ocs_ci.utility.resource_check import ( create_resource_dct, get_environment_status_after_execution, @@ -151,7 +152,6 @@ skipif_ui_not_support, run_cmd, ceph_health_check_multi_storagecluster_external, - CustomJSONEncoder, ) from ocs_ci.helpers import helpers, dr_helpers from ocs_ci.helpers.helpers import ( @@ -7556,7 +7556,6 @@ def finalizer(): @pytest.fixture(scope="session") def scale_noobaa_resources_session(request): - """ Session scoped fixture to scale noobaa resources @@ -7574,7 +7573,6 @@ def scale_noobaa_resources_fixture(request): def scale_noobaa_resources(request): - """ Scale the noobaa pod resources and scale endpoint count @@ -7994,7 +7992,7 @@ def factory( log.info( "Creating a hosted clusters with following deployment config: \n%s", json.dumps( - hosted_cluster_conf_on_provider, indent=4, cls=CustomJSONEncoder + hosted_cluster_conf_on_provider, indent=4, cls=SetToListJSONEncoder ), ) ocsci_config.update(hosted_cluster_conf_on_provider) @@ -8032,7 +8030,7 @@ def factory( log.debug( "Inserting new hosted cluster config to Multicluster Config " - f"\n{json.dumps(vars(cluster_config), indent=4, cls=CustomJSONEncoder)}" + f"\n{json.dumps(vars(cluster_config), indent=4, cls=SetToListJSONEncoder)}" ) ocsci_config.insert_cluster_config( ocsci_config.nclusters, cluster_config diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index 0c93f681ce1..eae9093abe4 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -20,9 +20,9 @@ runs_on_provider, ) from ocs_ci.ocs.ocp import OCP +from ocs_ci.ocs.resources.catalog_source import get_odf_tag_from_redhat_catsrc from ocs_ci.utility.utils import ( get_latest_release_version, - get_odf_tag_from_redhat_catsrc, ) from ocs_ci.utility.version import get_ocs_version_from_csv from ocs_ci.framework import config as ocsci_config From 4d121c6a8b5cb641312814c958e6445cc227886f Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 29 Jul 2024 16:59:09 +0300 Subject: [PATCH 27/29] review fixes Signed-off-by: Daniel Osypenko --- tests/libtest/test_provider_create_hosted_cluster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/libtest/test_provider_create_hosted_cluster.py b/tests/libtest/test_provider_create_hosted_cluster.py index eae9093abe4..7e9c564cb09 100644 --- a/tests/libtest/test_provider_create_hosted_cluster.py +++ b/tests/libtest/test_provider_create_hosted_cluster.py @@ -180,7 +180,8 @@ def test_create_destroy_hosted_cluster_with_fixture( self, create_hypershift_clusters, destroy_hosted_cluster ): """ - Test create hosted cluster with fixture + Test create hosted cluster with fixture and destroy cluster abruptly + Important that ceph resources associate with the cluster will not be cleaned up """ log_step("Create hosted client") cluster_name = get_random_hosted_cluster_name() From 0a1551499b9de4cf9271da0e7b18848b369ffecd Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 12 Aug 2024 13:21:28 +0300 Subject: [PATCH 28/29] review fixes from dhorak Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/helpers/hypershift_base.py | 2 +- ocs_ci/ocs/resources/catalog_source.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ocs_ci/deployment/helpers/hypershift_base.py b/ocs_ci/deployment/helpers/hypershift_base.py index 40bc677d75f..7f21fad8654 100644 --- a/ocs_ci/deployment/helpers/hypershift_base.py +++ b/ocs_ci/deployment/helpers/hypershift_base.py @@ -73,7 +73,7 @@ def get_random_hosted_cluster_name(): str: random cluster name """ # getting the cluster name from the env data, for instance "ibm_cloud_baremetal3; mandatory conf field" - bm_name = config.ENV_DATA.get("baremetal").get("env_name") + bm_name = config.ENV_DATA.get("baremetal", {}).get("env_name") ocp_version = get_latest_release_version() hcp_version = "".join([c for c in ocp_version if c.isdigit()][:3]) match = re.search(r"\d+$", bm_name) diff --git a/ocs_ci/ocs/resources/catalog_source.py b/ocs_ci/ocs/resources/catalog_source.py index c27ef8ae7ea..b2c1140861f 100644 --- a/ocs_ci/ocs/resources/catalog_source.py +++ b/ocs_ci/ocs/resources/catalog_source.py @@ -197,5 +197,5 @@ def get_odf_tag_from_redhat_catsrc(): namespace=constants.MARKETPLACE_NAMESPACE, resource_name="redhat-operators", ).get() - regestry_image = catsrc_data.get("spec").get("image") - return regestry_image.split(":")[-1] + registry_image = catsrc_data.get("spec").get("image") + return registry_image.split(":")[-1] From e171dbca62d63e88b027ef88365986a7f6316d71 Mon Sep 17 00:00:00 2001 From: Daniel Osypenko Date: Mon, 12 Aug 2024 15:38:23 +0300 Subject: [PATCH 29/29] review fixes Petr Signed-off-by: Daniel Osypenko --- ocs_ci/deployment/hosted_cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocs_ci/deployment/hosted_cluster.py b/ocs_ci/deployment/hosted_cluster.py index da4bfcae15a..c5ae57bdc9e 100644 --- a/ocs_ci/deployment/hosted_cluster.py +++ b/ocs_ci/deployment/hosted_cluster.py @@ -201,7 +201,7 @@ def storage_installation_requested(self, cluster_name): Check if the storage client installation was requested in the config Args: - cluster_name: str: Name of the cluster + cluster_name (str): Name of the cluster Returns: bool: True if the storage client installation was requested, False otherwise @@ -217,8 +217,8 @@ def deploy_hosted_ocp_clusters(self, cluster_names_list=None): Deploy multiple hosted OCP clusters on Provider platform Args: - cluster_names_list (list, optional): List of cluster names to deploy. If not provided, all clusters - in config.ENV_DATA["clusters"] will be deployed. + cluster_names_list (list): List of cluster names to deploy. If not provided, all clusters + in config.ENV_DATA["clusters"] will be deployed (optional argument) Returns: list: The list of cluster names for all hosted OCP clusters deployed by the func successfully @@ -325,7 +325,7 @@ def get_kubeconfig_path(self, cluster_name): Get the kubeconfig path for the cluster Args: - cluster_name: str: Name of the cluster + cluster_name (str): Name of the cluster Returns: str: Path to the kubeconfig file """