From 03350ab1f7fd364622b065dd0e97e55210538d4f Mon Sep 17 00:00:00 2001 From: Amrita Mahapatra <49347640+amr1ta@users.noreply.github.com> Date: Tue, 23 Jul 2024 13:30:01 +0530 Subject: [PATCH] Updated deployment method with labeling worker nodes as storage nodes if master nodes are not marked schedulable Signed-off-by: Amrita Mahapatra <49347640+amr1ta@users.noreply.github.com> --- ocs_ci/deployment/baremetal.py | 8 +++++++ .../storage_client_deployment.py | 21 ++++++++++++------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/ocs_ci/deployment/baremetal.py b/ocs_ci/deployment/baremetal.py index 3784a8ec9fe..4a3df71577e 100644 --- a/ocs_ci/deployment/baremetal.py +++ b/ocs_ci/deployment/baremetal.py @@ -1270,6 +1270,9 @@ def disks_available_to_cleanup(worker, namespace=constants.DEFAULT_NAMESPACE): ) disk_to_ignore_cleanup_raw = json.loads(str(out)) disk_available_for_cleanup_json = disk_to_ignore_cleanup_raw["blockdevices"] + logger.info( + f"The disks avialble for cleanup json: {disk_available_for_cleanup_json}" + ) for disk_to_ignore_cleanup in disk_available_for_cleanup_json: if disk_to_ignore_cleanup["mountpoint"] == "/boot": @@ -1284,6 +1287,11 @@ def disks_available_to_cleanup(worker, namespace=constants.DEFAULT_NAMESPACE): f"Ignorning disk {disk_to_ignore_cleanup['kname']} for cleanup because it's a rom disk " ) disk_available_for_cleanup_json.remove(str(disk_to_ignore_cleanup["kname"])) + elif "nbd" in disk_to_ignore_cleanup["kname"]: + logger.info( + f"Ignorning disk {disk_to_ignore_cleanup['kname']} for cleanup because it's a rom disk " + ) + disk_available_for_cleanup_json.remove(str(disk_to_ignore_cleanup["kname"])) return logger.info( f"no of disks avaliable to cleanup {len(disk_available_for_cleanup_json)}" ) diff --git a/ocs_ci/deployment/provider_client/storage_client_deployment.py b/ocs_ci/deployment/provider_client/storage_client_deployment.py index b0e00748cc4..9214fd123ea 100644 --- a/ocs_ci/deployment/provider_client/storage_client_deployment.py +++ b/ocs_ci/deployment/provider_client/storage_client_deployment.py @@ -97,12 +97,8 @@ def provider_and_native_client_installation( 7. Create storage profile """ - # Allow ODF to be deployed on all nodes - nodes = get_all_nodes() - node_objs = get_node_objs(nodes) - - log.info("labeling storage nodes") - label_nodes(nodes=node_objs, label=constants.OPERATOR_NODE_LABEL) + worker_node_objs = get_nodes(node_type=constants.WORKER_MACHINE) + no_of_worker_nodes = len(worker_node_objs) # Allow hosting cluster domain to be usable by hosted clusters path = "/spec/routeAdmission" @@ -132,13 +128,22 @@ def provider_and_native_client_installation( self.scheduler_obj.patch(params=params, format_type="json"), ( "Failed to run patch command to update control nodes as scheduleable" ) + # Allow ODF to be deployed on all nodes + nodes = get_all_nodes() + node_objs = get_node_objs(nodes) - worker_node_objs = get_nodes(node_type=constants.WORKER_MACHINE) - no_of_worker_nodes = len(worker_node_objs) + log.info("labeling all nodes as storage nodes") + label_nodes(nodes=node_objs, label=constants.OPERATOR_NODE_LABEL) + else: + log.info("labeling worker nodes as storage nodes") + label_nodes(nodes=worker_node_objs, label=constants.OPERATOR_NODE_LABEL) no_of_disks_available_on_worker_nodes = disks_available_to_cleanup( worker_node_objs[0] ) + log.info( + f"no of disks avilable for cleanup, {no_of_disks_available_on_worker_nodes}" + ) # Install LSO, create LocalVolumeDiscovery and LocalVolumeSet is_local_storage_available = self.sc_obj.is_exist(