Skip to content

Commit

Permalink
Create new test for create multiple device classes
Browse files Browse the repository at this point in the history
Signed-off-by: Itzhak Kave <[email protected]>
  • Loading branch information
Itzhak Kave committed Jan 2, 2025
1 parent 4829d0c commit e150cdf
Show file tree
Hide file tree
Showing 4 changed files with 201 additions and 0 deletions.
106 changes: 106 additions & 0 deletions ocs_ci/helpers/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -5619,3 +5619,109 @@ def apply_custom_taint_and_toleration(taint_label="xyz"):
)
for pod_obj in pod_list:
pod_obj.delete(wait=False)


def create_ceph_block_pool_for_deviceclass(
device_class,
pool_name=None,
namespace=None,
replica=3,
failure_domain=None,
verify=True,
):
"""
Create a Ceph block pool for a device class
Args:
device_class (str): The device class name
pool_name (str): The pool name to create
namespace (str): The pool namespace
replica (int): The replica size for a pool
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation. False otherwise
Returns:
OCS: The OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data["metadata"]["name"] = (
pool_name if pool_name else create_unique_resource_name("test", "cbp")
)
cbp_data["metadata"]["namespace"] = (
namespace or config.ENV_DATA["cluster_namespace"]
)
cbp_data["spec"]["deviceClass"] = device_class
cbp_data["spec"]["replicated"]["size"] = replica
cbp_data["spec"]["failureDomain"] = failure_domain or get_failure_domin()

cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()

if verify:
assert verify_block_pool_exists(
cbp_obj.name
), f"Block pool {cbp_obj.name} does not exist"
return cbp_obj


def create_lvs_resource(storageclass, worker_nodes=None, min_size=None, max_size=None):
"""
Create the LocalVolumeSet resource.
Args:
storageclass (string): storageClassName value to be used in
LocalVolumeSet CR based on LOCAL_VOLUME_YAML
worker_nodes (list): The worker node names to be used in the LocalVolumeSet resource
min_size (str): The min size to be used in the LocalVolumeSet resource
max_size (str): The max size to be used in the LocalVolumeSet resource
Returns:
OCS: The OCS instance for the LocalVolumeSet resource
"""
worker_nodes = worker_nodes or node.get_worker_nodes()

# Pull local volume set yaml data
logger.info("Pulling LocalVolumeSet CR data from yaml")
lvs_data = templating.load_yaml(constants.LOCAL_VOLUME_SET_YAML)

# Since we don't have datastore with SSD on our current VMware machines, localvolumeset doesn't detect
# NonRotational disk. As a workaround we are setting Rotational to device MechanicalProperties to detect
# HDD disk
if config.ENV_DATA.get(
"local_storage_allow_rotational_disks"
) or config.ENV_DATA.get("odf_provider_mode_deployment"):
logger.info(
"Adding Rotational for deviceMechanicalProperties spec"
" to detect HDD disk"
)
lvs_data["spec"]["deviceInclusionSpec"]["deviceMechanicalProperties"].append(
"Rotational"
)

if min_size:
lvs_data["spec"]["deviceInclusionSpec"]["minSize"] = min_size
if max_size:
lvs_data["spec"]["deviceInclusionSpec"]["maxSize"] = max_size
# Update local volume set data with Worker node Names
logger.info(
"Updating LocalVolumeSet CR data with worker nodes Name: %s", worker_nodes
)
lvs_data["spec"]["nodeSelector"]["nodeSelectorTerms"][0]["matchExpressions"][0][
"values"
] = worker_nodes

# Set storage class
logger.info(
"Updating LocalVolumeSet CR data with LSO storageclass: %s", storageclass
)
lvs_data["spec"]["storageClassName"] = storageclass

# set volumeMode to Filesystem for MCG only deployment
if config.ENV_DATA["mcg_only_deployment"]:
lvs_data["spec"]["volumeMode"] = constants.VOLUME_MODE_FILESYSTEM

lvs_obj = create_resource(**lvs_data)
lvs_obj.reload()
return lvs_obj
70 changes: 70 additions & 0 deletions ocs_ci/helpers/multiple_device_classes.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
import logging
import random

from ocs_ci.helpers.helpers import create_lvs_resource
from ocs_ci.ocs.node import add_disk_to_node
from ocs_ci.ocs.resources.storage_cluster import get_storage_size
from ocs_ci.utility.utils import sum_of_two_storage_sizes

from ocs_ci.ocs import constants, defaults
from ocs_ci.ocs.ocp import OCP


log = logging.getLogger(__name__)


def create_new_lvs_for_new_deviceclass(
worker_nodes, create_disks_for_lvs=True, ssd=True
):
"""
Create new LocalVolumeSet resource for a new device class
Args:
worker_nodes (list): The worker node names to be used in the LocalVolumeSet resource.
create_disks_for_lvs (bool): If True, it will create a new disks for the new LocalVolumeSet resource.
ssd (bool): if True, mark disk as SSD
Returns:
OCS: The OCS instance for the LocalVolumeSet resource
"""
osd_size = get_storage_size()
log.info(f"the osd size is {osd_size}")
old_lvs_max_size = sum_of_two_storage_sizes(osd_size, "30Gi")
ocp_lvs_obj = OCP(
kind=constants.LOCAL_VOLUME_SET,
namespace=defaults.LOCAL_STORAGE_NAMESPACE,
resource_name=constants.LOCAL_BLOCK_RESOURCE,
)
log.info(
f"Update the old LocalVolumeSet {ocp_lvs_obj.resource_name} with the maxSize "
f"{old_lvs_max_size} so it will not consume the new PVs"
)
params = (
f'{{"spec": {{"deviceInclusionSpec": {{"maxSize": "{old_lvs_max_size}"}}}}}}'
)
lvs_result = ocp_lvs_obj.patch(params=params, format_type="json")
assert (
lvs_result
), f"Failed to update the LocalVolumeSet {ocp_lvs_obj.resource_name}"

log.info(
"Create a new minSize that will be be higher than the maxSize of the old LVS, so that the new LVS "
"will consume the disks with the new size"
)
min_size = sum_of_two_storage_sizes(old_lvs_max_size, "10Gi")
log.info(
"Limit the max size of the new LVS, so it will consume only the new added disks"
)
max_size = sum_of_two_storage_sizes(old_lvs_max_size, "60Gi")
suffix = "".join(random.choices("0123456789", k=5))
sc_name = f"ssd{suffix}"
lvs_obj = create_lvs_resource(sc_name, worker_nodes, min_size, max_size)

if create_disks_for_lvs:
disk_size_in_gb = sum_of_two_storage_sizes(old_lvs_max_size, "20Gi")
disk_size = int(disk_size_in_gb[:-2])
for n in worker_nodes:
add_disk_to_node(n, disk_size=disk_size, ssd=ssd)

return lvs_obj
3 changes: 3 additions & 0 deletions ocs_ci/ocs/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,6 +644,9 @@
CEPHFILESYSTEM_YAML = os.path.join(TEMPLATE_CSI_FS_DIR, "CephFileSystem.yaml")

CEPHBLOCKPOOL_YAML = os.path.join(TEMPLATE_DEPLOYMENT_DIR, "cephblockpool.yaml")
DEVICECLASS_CEPHBLOCKPOOL_YAML = os.path.join(
TEMPLATE_DEPLOYMENT_DIR, "deviceclass-cephblockpool.yaml"
)

VSPHERE_THICK_STORAGECLASS_YAML = os.path.join(
TEMPLATE_DEPLOYMENT_DIR, "vsphere_storageclass_thick.yaml"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
import logging

from ocs_ci.framework.testlib import (
ManageTest,
ignore_leftovers,
tier1,
brown_squad,
)
from ocs_ci.ocs.node import get_osd_running_nodes
from ocs_ci.helpers.multiple_device_classes import create_new_lvs_for_new_deviceclass

log = logging.getLogger(__name__)


@brown_squad
@tier1
@ignore_leftovers
class TestMultipleDeviceClasses(ManageTest):
def test_add_new_ssd_device_class(self):
osd_node_names = get_osd_running_nodes()
log.info(f"osd node names = {osd_node_names}")
create_new_lvs_for_new_deviceclass(osd_node_names)

0 comments on commit e150cdf

Please sign in to comment.