Skip to content

Commit

Permalink
Updated deployment method with labeling worker nodes as storage nodes…
Browse files Browse the repository at this point in the history
… if master nodes are not marked schedulable

Signed-off-by: Amrita Mahapatra <[email protected]>
  • Loading branch information
amr1ta committed Jul 23, 2024
1 parent 54c8e06 commit 03350ab
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 8 deletions.
8 changes: 8 additions & 0 deletions ocs_ci/deployment/baremetal.py
Original file line number Diff line number Diff line change
Expand Up @@ -1270,6 +1270,9 @@ def disks_available_to_cleanup(worker, namespace=constants.DEFAULT_NAMESPACE):
)
disk_to_ignore_cleanup_raw = json.loads(str(out))
disk_available_for_cleanup_json = disk_to_ignore_cleanup_raw["blockdevices"]
logger.info(
f"The disks avialble for cleanup json: {disk_available_for_cleanup_json}"
)

for disk_to_ignore_cleanup in disk_available_for_cleanup_json:
if disk_to_ignore_cleanup["mountpoint"] == "/boot":
Expand All @@ -1284,6 +1287,11 @@ def disks_available_to_cleanup(worker, namespace=constants.DEFAULT_NAMESPACE):
f"Ignorning disk {disk_to_ignore_cleanup['kname']} for cleanup because it's a rom disk "
)
disk_available_for_cleanup_json.remove(str(disk_to_ignore_cleanup["kname"]))
elif "nbd" in disk_to_ignore_cleanup["kname"]:
logger.info(
f"Ignorning disk {disk_to_ignore_cleanup['kname']} for cleanup because it's a rom disk "
)
disk_available_for_cleanup_json.remove(str(disk_to_ignore_cleanup["kname"]))
return logger.info(
f"no of disks avaliable to cleanup {len(disk_available_for_cleanup_json)}"
)
Expand Down
21 changes: 13 additions & 8 deletions ocs_ci/deployment/provider_client/storage_client_deployment.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,12 +97,8 @@ def provider_and_native_client_installation(
7. Create storage profile
"""

# Allow ODF to be deployed on all nodes
nodes = get_all_nodes()
node_objs = get_node_objs(nodes)

log.info("labeling storage nodes")
label_nodes(nodes=node_objs, label=constants.OPERATOR_NODE_LABEL)
worker_node_objs = get_nodes(node_type=constants.WORKER_MACHINE)
no_of_worker_nodes = len(worker_node_objs)

# Allow hosting cluster domain to be usable by hosted clusters
path = "/spec/routeAdmission"
Expand Down Expand Up @@ -132,13 +128,22 @@ def provider_and_native_client_installation(
self.scheduler_obj.patch(params=params, format_type="json"), (
"Failed to run patch command to update control nodes as scheduleable"
)
# Allow ODF to be deployed on all nodes
nodes = get_all_nodes()
node_objs = get_node_objs(nodes)

worker_node_objs = get_nodes(node_type=constants.WORKER_MACHINE)
no_of_worker_nodes = len(worker_node_objs)
log.info("labeling all nodes as storage nodes")
label_nodes(nodes=node_objs, label=constants.OPERATOR_NODE_LABEL)
else:
log.info("labeling worker nodes as storage nodes")
label_nodes(nodes=worker_node_objs, label=constants.OPERATOR_NODE_LABEL)

no_of_disks_available_on_worker_nodes = disks_available_to_cleanup(
worker_node_objs[0]
)
log.info(
f"no of disks avilable for cleanup, {no_of_disks_available_on_worker_nodes}"
)

# Install LSO, create LocalVolumeDiscovery and LocalVolumeSet
is_local_storage_available = self.sc_obj.is_exist(
Expand Down

0 comments on commit 03350ab

Please sign in to comment.