diff --git a/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_2w.yaml b/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_2w.yaml index 9867702a59f..fe536705ecf 100644 --- a/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_2w.yaml +++ b/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_2w.yaml @@ -7,5 +7,6 @@ ENV_DATA: worker_replicas: 2 mon_type: 'hostpath' osd_type: 'nvme' + quota: 'unrestricted' REPORTING: ocs_must_gather_image: "quay.io/rhceph-dev/ocs-must-gather" diff --git a/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_3w.yaml b/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_3w.yaml index 5ec19d69af8..53de90f7503 100644 --- a/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_3w.yaml +++ b/conf/deployment/fusion_hci_pc/client_bm_upi_1az_rhcos_nvme_3w.yaml @@ -7,5 +7,6 @@ ENV_DATA: worker_replicas: 3 mon_type: 'hostpath' osd_type: 'nvme' + quota: 'unrestricted' REPORTING: ocs_must_gather_image: "quay.io/rhceph-dev/ocs-must-gather" diff --git a/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml b/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml index 73e942d82b8..f389510a9e0 100644 --- a/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml +++ b/conf/deployment/fusion_hci_pc/hypershift_client_bm_2w.yaml @@ -5,6 +5,7 @@ ENV_DATA: worker_replicas: 2 mon_type: 'hostpath' osd_type: 'ssd' + quota: 'unrestricted' REPORTING: ocs_must_gather_image: "quay.io/ocs-dev/ocs-must-gather" ocs_must_gather_latest_tag: 'latest' diff --git a/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml b/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml index 4b41d399e51..604c640c9d1 100644 --- a/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml +++ b/conf/deployment/fusion_hci_pc/hypershift_client_bm_3w.yaml @@ -5,6 +5,7 @@ ENV_DATA: worker_replicas: 3 mon_type: 'hostpath' osd_type: 'ssd' + quota: 'unrestricted' REPORTING: ocs_must_gather_image: "quay.io/ocs-dev/ocs-must-gather" ocs_must_gather_latest_tag: 'latest' diff --git a/ocs_ci/framework/__init__.py b/ocs_ci/framework/__init__.py index be332687753..4ed023303bc 100644 --- a/ocs_ci/framework/__init__.py +++ b/ocs_ci/framework/__init__.py @@ -290,6 +290,18 @@ def get_consumer_indexes_list(self): return consumer_indexes_list + def get_consumer_with_resticted_quota_index(self): + """ + Get the consumer cluster index + of the first consumer which has quota restrictions + """ + consumer_indexes = self.get_consumer_indexes_list() + for index in consumer_indexes: + cluster = self.clusters[index] + if cluster.ENV_DATA["quota"] != "unlimited": + return index + raise ClusterNotFoundException("Didn't find any consumer with resticted quota") + def get_cluster_index_by_name(self, cluster_name): """ Get the cluster index by the cluster name @@ -497,6 +509,21 @@ def __init__(self): switch_index = config.cur_index super().__init__(switch_index) + class RunWithRestrictedQuotaConsumerConfigContextIfAvailable(RunWithConfigContext): + """ + Context manager that makes sure that a given code block is executed + on a Consumer with restricted quota. + If such config is not available, then run with current config context. + """ + + def __init__(self): + try: + switch_index = config.get_consumer_with_resticted_quota_index() + except ClusterNotFoundException: + logger.DEBUG("No consumer with restricted quota found") + switch_index = config.cur_index + super().__init__(switch_index) + class RunWithFirstConsumerConfigContextIfAvailable(RunWithConfigContext): """ Context manager that makes sure that a given code block is executed on First consumer. diff --git a/ocs_ci/ocs/resources/storageconsumer.py b/ocs_ci/ocs/resources/storageconsumer.py index 1cb41836633..9fc4ab98fed 100644 --- a/ocs_ci/ocs/resources/storageconsumer.py +++ b/ocs_ci/ocs/resources/storageconsumer.py @@ -6,6 +6,7 @@ from ocs_ci.framework import config from ocs_ci.ocs import constants, ocp +from ocs_ci.helpers import helpers from ocs_ci.ocs.resources.ocs import OCS from ocs_ci.utility.utils import exec_cmd @@ -120,3 +121,67 @@ def get_heartbeat_cronjob(self): if job["metadata"]["name"].endswith("status-reporter") ][0] return cronjob + + def fill_up_quota_percentage(self, percentage, quota=None): + """ + Create a PVC of such size that the correct percentage of quota is used + + Returns: + PVC object + """ + pvc_name = f"pvc-quota-{percentage}" + if not quota: + quota = config.ENV_DATA["quota"] + quota_value = quota.split(" ")[0] + quota_units = quota.split(" ")[1] + pvc_size_int = quota_value * percentage // 100 + pvc_size = f"{pvc_size_int}{quota_units}" + rbd_storageclass = helpers.default_storage_class(constants.CEPHBLOCKPOOL) + pvc_obj = helpers.create_pvc( + pvc_name=pvc_name, + sc_name=rbd_storageclass, + namespace="default", + size=pvc_size, + do_reload=False, + access_mode=constants.ACCESS_MODE_RWO, + volume_mode=constants.VOLUME_MODE_BLOCK, + ) + return pvc_obj + + +def get_all_client_clusters(): + """ + Get client cluster names of all storage consumers + + Returns: + array: names of client clusters + """ + ocp_storageconsumers = ocp.OCP( + kind=constants.STORAGECONSUMER, + namespace=config.cluster_ctx.ENV_DATA["cluster_namespace"], + ) + cluster_names = [] + storageconsumers_data = ocp_storageconsumers.get().get("items") + for storageconsumer in storageconsumers_data: + cluster_names.append(storageconsumer["status"]["client"]["clusterName"]) + return cluster_names + + +def get_storageconsumer_quota(cluster_name): + """ + Get the quota value from storageconsumer details + Args: + clustername(str): name of the client cluster + Returns" + str: quota value + """ + ocp_storageconsumers = ocp.OCP( + kind=constants.STORAGECONSUMER, + namespace=config.cluster_ctx.ENV_DATA["cluster_namespace"], + ) + storageconsumers_data = ocp_storageconsumers.get().get("items") + for storageconsumer in storageconsumers_data: + if storageconsumer["status"]["client"]["clusterName"] == cluster_name: + if "storageQuotaInGiB" not in storageconsumer["spec"]: + return "Unlimited" + return storageconsumer["spec"]["storageQuotaInGiB"] diff --git a/ocs_ci/ocs/ui/page_objects/storage_clients.py b/ocs_ci/ocs/ui/page_objects/storage_clients.py index c1f05feb2eb..a13232b0939 100644 --- a/ocs_ci/ocs/ui/page_objects/storage_clients.py +++ b/ocs_ci/ocs/ui/page_objects/storage_clients.py @@ -1,5 +1,5 @@ import logging - +from selenium.common.exceptions import WebDriverException from ocs_ci.ocs.ui.base_ui import take_screenshot, copy_dom, BaseUI logger = logging.getLogger(__name__) @@ -13,14 +13,35 @@ class StorageClients(BaseUI): def __init__(self): super().__init__() - def generate_client_onboarding_ticket(self): + def generate_client_onboarding_ticket(self, quota_value=None, quota_tib=None): """ - Generate a client onboarding ticket + Generate a client onboarding ticket. + Starting with version 4.17, client quota can be specified + + Args: + quota_value (int): client's quota in GiB or TiB, unlimited if not defined + quota_tib (bool): True if quota is in TiB, False otherwise Returns: str: onboarding_key """ + logger.info("Generating onboarding ticket") self.do_click(self.storage_clients_loc["generate_client_onboarding_ticket"]) + if quota_value: + logger.info("Setting client cluster quota") + self.do_click(self.storage_clients_loc["custom_quota"]) + self.do_clear( + locator=self.storage_clients_loc["quota_value"], + ) + self.do_send_keys( + locator=self.storage_clients_loc["quota_value"], + text=quota_value, + ) + if quota_tib: + self.do_click(self.storage_clients_loc["choose_units"]) + self.do_click(self.storage_clients_loc["quota_ti"]) + logger.info("Confirming token generation") + self.do_click(self.storage_clients_loc["confirm_generation"]) onboarding_key = self.get_element_text( self.storage_clients_loc["onboarding_key"] ) @@ -41,3 +62,126 @@ def close_onboarding_token_modal(self): Close the onboarding token modal """ self.do_click(self.storage_clients_loc["close_token_modal"]) + + def find_client_cluster_index(self, client_cluster_name): + """ + Find the index of the cluster on Storage clients page + Filtering clients by name isn't working: https://bugzilla.redhat.com/show_bug.cgi?id=2317212 + + Args: + client_cluster_name(str): name of the hosted cluster + + Returns: + int: index of the cluster on Storage Clients page + + """ + all_names = [ + element.text + for element in self.get_elements(self.storage_clients_loc["cluster_name"]) + ] + for index in range(len(all_names)): + if client_cluster_name in all_names[index]: + logger.info(f"Storage client {client_cluster_name} has index {index}") + return index + logger.error( + f"Storage client with cluster name {client_cluster_name} not found" + ) + + def get_client_quota_from_ui(self, client_cluster_name): + """ + Get client's quota from Storage Client's page + Args: + client_cluster_name(str): name of the client cluster + Returns: + str: quota of the client + """ + client_index = self.find_client_cluster_index(client_cluster_name) + quota_element = self.get_elements(self.storage_clients_loc["client_quota"])[ + client_index + ] + return quota_element.text + + def edit_quota( + self, client_cluster_name, new_value=None, new_units=False, increase_by_one=True + ): + """ + Edit client's storage quota + + Args: + client_cluster_name(str): name of the client cluster + new_value(int): new value of the quota + new_units(bool): True if units need to be changed, False otherwise + increase_by_one(bool): True if quota needs to be increased by 1, False otherwise + + Returns: + True if quota change was successful + False otherwise + """ + client_index = self.find_client_cluster_index(client_cluster_name) + self.do_click( + self.get_elements(self.storage_clients_loc["client_kebab_menu"])[ + client_index + ] + ) + try: + self.do_click(self.storage_clients_loc["edit_quota"]) + except WebDriverException as e: + logger.info(e) + logger.info("Quota changes not possble") + return False + if increase_by_one: + self.do_click(self.storage_clients_loc["quota_increment"]) + logger.info("Quota increased by 1") + else: + if not new_value: + logger.error("New quota value not provided") + return False + else: + self.clear_with_ctrl_a_del(self.storage_clients_loc["new_quota"]) + self.do_send_keys(self.storage_clients_loc["new_quota"], text=new_value) + logger.info(f"Quota value changed to {new_value}") + if new_units: + self.do_click(self.storage_clients_loc["unit_change_button"]) + self.do_click(self.storage_clients_loc["units_ti"]) + logger.info("Quota units changed to Ti") + try: + self.do_click(self.storage_clients_loc["confirm_quota_change"]) + logger.info("Quota changes saved") + return True + except WebDriverException as e: + logger.info(e) + logger.info("Quota changes could not be saved") + return False + + def get_available_storage_from_quota_edit_popup(self): + """ + Get the value of available storage + from Edit quota popup + + Returns: + str: available storage + """ + self.do_click( + self.get_elements(self.storage_clients_loc["client_kebab_menu"])[0] + ) + av_capacity_text = self.get_element_text( + self.storage_clients_loc["available_storage"] + ) + # Text is expected to be 'Available capacity (ocs-storagecluster): N TiB' + split_capacity_text = av_capacity_text.split(" ") + return f"{split_capacity_text[-2]} {split_capacity_text[-1]}" + + def validate_unlimited_quota_utilization_info(self): + """ + Verify that for every client with unlimited quota + utilization column only shows "-" + """ + quota_elements = self.get_elements(self.storage_clients_loc["client_quota"]) + utilization_elements = self.get_elements( + self.storage_clients_loc["quota_utilization"] + ) + for i in len(quota_elements): + if quota_elements[i].text == "Unlimited": + assert ( + utilization_elements[i].text == "-" + ), f"Quota utilization is shown as {utilization_elements[i].text}" diff --git a/ocs_ci/ocs/ui/views.py b/ocs_ci/ocs/ui/views.py index 4e20cf3792a..8ccb72cd5ca 100644 --- a/ocs_ci/ocs/ui/views.py +++ b/ocs_ci/ocs/ui/views.py @@ -668,7 +668,32 @@ "//div[@class='odf-onboarding-modal__text-area']", By.XPATH, ), + "quota_value": ("//input[@type='number']", By.XPATH), + "choose_units": ( + "//div[contains(@class, 'request-size-input__unit')]/button", + By.XPATH, + ), + "quota_ti": ("//li[@id='Ti']", By.XPATH), + "confirm_generation": ("//button[@data-test-id='confirm-action']", By.XPATH), "close_token_modal": ("//button[@aria-label='Close']", By.XPATH), + "client_name": ("name", By.ID), + "cluster_name": ("clusterName", By.ID), + "client_quota": ("storageQuota", By.ID), + "custom_quota": ("storage-quota-custom", By.ID), + "client_kebab_menu": ("//button[@data-test='kebab-button']", By.XPATH), + "edit_quota": ("Edit Resource", By.ID), + "quota_decrement": ("button[aria-label='Decrement']", By.CSS_SELECTOR), + "quota_increment": ("button[aria-label='Increment']", By.CSS_SELECTOR), + "new_quota": ("input[type=number]", By.CSS_SELECTOR), + "unit_change_button": ( + "//div[@class='pf-v5-c-dropdown request-size-input__unit']/button", + By.XPATH, + ), + "units_ti": ("li[id=Ti]", By.CSS_SELECTOR), + "storage_available": ("//span[@data-test='status-text']", By.XPATH), + "quota_decreased_alert": ("//h4[@class='pf-v5-c-alert__title']", By.XPATH), + "confirm_quota_change": ("//button[@data-test-id='confirm-action']", By.XPATH), + "available_storage": (), } page_nav = { diff --git a/tests/functional/monitoring/conftest.py b/tests/functional/monitoring/conftest.py index 33aacc2ab03..4f3405ef607 100644 --- a/tests/functional/monitoring/conftest.py +++ b/tests/functional/monitoring/conftest.py @@ -1209,3 +1209,62 @@ def teardown(): teardown() return measured_op + + +@pytest.fixture +def measure_fill_up_client_quota( + request, + measurement_dir, + threading_lock, +): + """ + Create PVCs on the client cluster where quota is restricted + to reach 85% of the quota, measure the time when it was created and + alerts that were triggered during this event. + + Returns: + dict: Contains information about `start` and `stop` time + for creating and then deleting the PVC + """ + logger.info("Switch to client cluster with restricted quota") + with config.get_consumer_with_resticted_quota_index(): + client_cluster = config.cluster_ctx.MULTICLUSTER["multicluster_index"] + logger.info(f"Client cluster key: {client_cluster}") + cluster_id = exec_cmd( + "oc get clusterversion version -o jsonpath='{.spec.clusterID}'" + ).stdout.decode("utf-8") + client_name = f"storageconsumer-{cluster_id}" + client = storageconsumer.StorageConsumer( + client_name, consumer_context=client_cluster + ) + pvc = None + + def use_up_quota_80_percent(): + nonlocal pvc + nonlocal client + quota = config.ENV_DATA["quota"] + pvc = client.fill_up_quota_percentage(percentage=80, quota=quota) + # run_time of operation + run_time = 60 * 3 + logger.info(f"Waiting for {run_time} seconds") + time.sleep(run_time) + return + + def teardown(): + nonlocal pvc + with config.get_consumer_with_resticted_quota_index(): + pvc.ocp.wait_for_delete(resource_name=pvc.name, timeout=180) + + request.addfinalizer(teardown) + + test_file = os.path.join(measurement_dir, "measure_change_client_version.json") + measured_op = measure_operation( + use_up_quota_80_percent, + test_file, + threading_lock=threading_lock, + metadata={"client_name": client_name}, + ) + + teardown() + + return measured_op diff --git a/tests/functional/monitoring/prometheus/alerts/test_provider_client.py b/tests/functional/monitoring/prometheus/alerts/test_provider_client.py index 9a6c42b4fff..1a9e1b237b1 100644 --- a/tests/functional/monitoring/prometheus/alerts/test_provider_client.py +++ b/tests/functional/monitoring/prometheus/alerts/test_provider_client.py @@ -89,6 +89,34 @@ def test_change_client_ocs_version_and_stop_heartbeat( ) +@blue_squad +@tier4c +@runs_on_provider +@hci_provider_and_client_required +def test_quota_fillup_80_alert(measure_fill_up_client_quota, threading_lock): + api = prometheus.PrometheusAPI(threading_lock=threading_lock) + + # get alerts from time when manager deployment was scaled down + alerts = measure_fill_up_client_quota.get("prometheus_alerts") + # client_name = measure_fill_up_client_quota.get("metadata").get("client_name") + target_alerts = [] + states = ["firing"] + + for target_alert in target_alerts: + prometheus.check_alert_list( + label=target_alert["label"], + msg=target_alert["msg"], + alerts=alerts, + states=states, + severity=target_alert["severity"], + ) + api.check_alert_cleared( + label=target_alert["label"], + measure_end_time=measure_fill_up_client_quota.get("stop"), + time_min=300, + ) + + def teardown_module(): ocs_obj = OCP() ocs_obj.login_as_sa() diff --git a/tests/functional/ui/test_provider_client.py b/tests/functional/ui/test_provider_client.py new file mode 100644 index 00000000000..b8fc176f0da --- /dev/null +++ b/tests/functional/ui/test_provider_client.py @@ -0,0 +1,118 @@ +import logging + +# from ocs_ci.ocs import constants +import ocs_ci.ocs.resources.pod as pod +from ocs_ci.framework.testlib import ( + skipif_ocs_version, + ManageTest, + tier1, + skipif_ocp_version, + runs_on_provider, + black_squad, + hci_provider_required, +) +from ocs_ci.ocs.resources import storageconsumer +from ocs_ci.ocs.ui.page_objects.page_navigator import PageNavigator + + +logger = logging.getLogger(__name__) + + +@tier1 +@black_squad +@skipif_ocs_version("<4.17") +@skipif_ocp_version("<4.17") +@runs_on_provider +@hci_provider_required +class TestOnboardingTokenGenerationWithQuota(ManageTest): + """ + Test onboarding token generation when quota is specified + """ + + def test_token_generation_with_quota( + self, setup_ui_class, quota_value=2, quota_tib=True + ): + storage_clients = PageNavigator().nav_to_storageclients_page() + token = storage_clients.generate_client_onboarding_ticket( + quota_value=quota_value, quota_tib=quota_tib + ) + logger.info(f"Token generated. It begins with {token[:20]}") + assert len(token) > 20, "Token is too short" + + def test_quota_decrease_blocked(self, setup_ui_class): + """ + Test that quota cannot be increased for a client: + if a client has unlimited quota, it cannot be changed. + If a client has limited quota, the new value cannot be lower + """ + storage_clients_page = PageNavigator().nav_to_storageclients_page() + client_clusters = storageconsumer.get_all_client_clusters() + for client in client_clusters: + quota = storage_clients_page.get_client_quota_from_ui(client) + if quota == "Unlimited": + assert not storage_clients_page.edit_quota( + client_cluster_name=client, increase_by_one=True + ) + else: + new_quota = int(quota) - 1 + assert not storage_clients_page.edit_quota( + client_cluster_name=client, + increase_by_one=False, + new_value=new_quota, + ) + + def test_quota_increase(self, setup_ui_class): + """ + Test that quota can be increased in the UI for every client with limited quota + both by manually setting a new value and by clicking Increment + + """ + storage_clients_page = PageNavigator().nav_to_storageclients_page() + client_clusters = storageconsumer.get_all_client_clusters() + for client in client_clusters: + quota = storage_clients_page.get_client_quota_from_ui(client) + if quota != "Unlimited": + new_quota = int(quota) + 1 + assert storage_clients_page.edit_quota( + client_cluster_name=client, + increase_by_one=False, + new_value=new_quota, + ) + assert storage_clients_page.edit_quota( + client_cluster_name=client, increase_by_one=True + ) + + def test_available_capacity_in_quota_edit_popup(self, setup_ui_class): + """ + Test that Quota edit popup shows correct value of + Available capacity + """ + storage_clients_page = PageNavigator().nav_to_storageclients_page() + ceph_pod = pod.get_ceph_tools_pod() + ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph df") + ceph_capacity_bytes = ceph_status["stats"]["total_avail_bytes"] + ui_capacity = storage_clients_page.get_available_storage_from_quota_edit_popup() + if "TiB" in ui_capacity: + ui_capacity_num = float(ui_capacity.split(" ")[0]) + ceph_capacity_tib = ceph_capacity_bytes / 2**40 + assert (ui_capacity_num - ceph_capacity_tib) ** 2 < 0.1 + + def test_quota_values_in_ui(self, setup_ui_class): + """ + Test that all storage clients have correct quota value in the UI + """ + storage_clients_page = PageNavigator().nav_to_storageclients_page() + client_clusters = storageconsumer.get_all_client_clusters() + for client in client_clusters: + quota_ui = storage_clients_page.get_client_quota_from_ui(client) + quota_cli = storageconsumer.get_storageconsumer_quota(client) + assert quota_ui == quota_cli, f"Quota in the UI: {quota_ui}, " + "quota in the CLI: {quota_cli}" + + def test_usage_for_unlimited_quota_clients(self, setup_ui_class): + """ + Test that clients with unlimited storage don't have + quota usage shown on Clients page + """ + storage_clients_page = PageNavigator().nav_to_storageclients_page() + storage_clients_page.validate_unlimited_quota_utilization_info()